repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/object_detection/builders/model_builder_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_builder under TensorFlow 2.X."""
import os
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import model_builder
from object_detection.builders import model_builder_test
from object_detection.core import losses
from object_detection.meta_architectures import deepmac_meta_arch
from object_detection.models import center_net_hourglass_feature_extractor
from object_detection.models.keras_models import hourglass_network
from object_detection.protos import center_net_pb2
from object_detection.protos import model_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ModelBuilderTF2Test(
model_builder_test.ModelBuilderTest, parameterized.TestCase):
def default_ssd_feature_extractor(self):
return 'ssd_resnet50_v1_fpn_keras'
def default_faster_rcnn_feature_extractor(self):
return 'faster_rcnn_resnet101_keras'
def ssd_feature_extractors(self):
return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_override_base_feature_extractor_hyperparams(self, extractor_type):
return extractor_type in {}
def faster_rcnn_feature_extractors(self):
return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP
def get_fake_label_map_file_path(self):
keypoint_spec_text = """
item {
name: "/m/01g317"
id: 1
display_name: "person"
keypoints {
id: 0
label: 'nose'
}
keypoints {
id: 1
label: 'left_shoulder'
}
keypoints {
id: 2
label: 'right_shoulder'
}
keypoints {
id: 3
label: 'hip'
}
}
"""
keypoint_label_map_path = os.path.join(
self.get_temp_dir(), 'keypoint_label_map')
with tf.gfile.Open(keypoint_label_map_path, 'wb') as f:
f.write(keypoint_spec_text)
return keypoint_label_map_path
def get_fake_keypoint_proto(self, customize_head_params=False):
task_proto_txt = """
task_name: "human_pose"
task_loss_weight: 0.9
keypoint_regression_loss_weight: 1.0
keypoint_heatmap_loss_weight: 0.1
keypoint_offset_loss_weight: 0.5
heatmap_bias_init: 2.14
keypoint_class_name: "/m/01g317"
loss {
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
localization_loss {
l1_localization_loss {
}
}
}
keypoint_label_to_std {
key: "nose"
value: 0.3
}
keypoint_label_to_std {
key: "hip"
value: 0.0
}
keypoint_candidate_score_threshold: 0.3
num_candidates_per_keypoint: 12
peak_max_pool_kernel_size: 5
unmatched_keypoint_score: 0.05
box_scale: 1.7
candidate_search_scale: 0.2
candidate_ranking_mode: "score_distance_ratio"
offset_peak_radius: 3
per_keypoint_offset: true
predict_depth: true
per_keypoint_depth: true
keypoint_depth_loss_weight: 0.3
score_distance_multiplier: 11.0
std_dev_multiplier: 2.8
rescoring_threshold: 0.5
gaussian_denom_ratio: 0.3
argmax_postprocessing: True
"""
if customize_head_params:
task_proto_txt += """
heatmap_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
offset_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
config = text_format.Merge(task_proto_txt,
center_net_pb2.CenterNet.KeypointEstimation())
return config
def get_fake_object_center_proto(self, customize_head_params=False):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
peak_max_pool_kernel_size: 5
"""
if customize_head_params:
proto_txt += """
center_head_params {
num_filters: 64
num_filters: 32
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_center_from_keypoints_proto(self):
proto_txt = """
object_center_loss_weight: 0.5
heatmap_bias_init: 3.14
min_box_overlap_iou: 0.2
max_box_predictions: 15
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 3.0
beta: 4.0
}
}
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
keypoint_weights_for_center: 1.0
keypoint_weights_for_center: 0.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectCenterParams())
def get_fake_object_detection_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.5
offset_loss_weight: 0.1
scale_loss_weight: 0.2
localization_loss {
l1_localization_loss {
}
}
"""
if customize_head_params:
proto_txt += """
scale_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.ObjectDetection())
def get_fake_mask_proto(self, customize_head_params=False):
proto_txt = """
task_loss_weight: 0.7
classification_loss {
weighted_softmax {}
}
mask_height: 8
mask_width: 8
score_threshold: 0.7
heatmap_bias_init: -2.0
"""
if customize_head_params:
proto_txt += """
mask_head_params {
num_filters: 128
num_filters: 64
kernel_sizes: 5
kernel_sizes: 3
}
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.MaskEstimation())
def get_fake_densepose_proto(self):
proto_txt = """
task_loss_weight: 0.5
class_id: 0
loss {
classification_loss {
weighted_softmax {}
}
localization_loss {
l1_localization_loss {
}
}
}
num_parts: 24
part_loss_weight: 1.0
coordinate_loss_weight: 2.0
upsample_to_input_res: true
heatmap_bias_init: -2.0
"""
return text_format.Merge(proto_txt,
center_net_pb2.CenterNet.DensePoseEstimation())
@parameterized.parameters(
{'customize_head_params': True},
{'customize_head_params': False}
)
def test_create_center_net_model(self, customize_head_params):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Merge(proto_txt, model_pb2.DetectionModel())
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_proto(
customize_head_params=customize_head_params))
config.center_net.object_detection_task.CopyFrom(
self.get_fake_object_detection_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto(
customize_head_params=customize_head_params))
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
config.center_net.mask_estimation_task.CopyFrom(
self.get_fake_mask_proto(
customize_head_params=customize_head_params))
config.center_net.densepose_estimation_task.CopyFrom(
self.get_fake_densepose_proto())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertIsInstance(model._center_params.classification_loss,
losses.PenaltyReducedLogisticFocalLoss)
self.assertEqual(model._center_params.classification_loss._alpha, 3.0)
self.assertEqual(model._center_params.classification_loss._beta, 4.0)
self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2)
self.assertAlmostEqual(
model._center_params.heatmap_bias_init, 3.14, places=4)
self.assertEqual(model._center_params.max_box_predictions, 15)
if customize_head_params:
self.assertEqual(model._center_params.center_head_num_filters, [64, 32])
self.assertEqual(model._center_params.center_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._center_params.center_head_num_filters, [256])
self.assertEqual(model._center_params.center_head_kernel_sizes, [3])
self.assertEqual(model._center_params.peak_max_pool_kernel_size, 5)
# Check object detection related parameters.
self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1)
self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2)
self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5)
self.assertIsInstance(model._od_params.localization_loss,
losses.L1LocalizationLoss)
self.assertEqual(model._od_params.offset_head_num_filters, [256])
self.assertEqual(model._od_params.offset_head_kernel_sizes, [3])
if customize_head_params:
self.assertEqual(model._od_params.scale_head_num_filters, [128, 64])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._od_params.scale_head_num_filters, [256])
self.assertEqual(model._od_params.scale_head_kernel_sizes, [3])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0)
self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5)
self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4)
self.assertEqual(kp_params.classification_loss._alpha, 3.0)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0])
self.assertEqual(kp_params.classification_loss._beta, 4.0)
self.assertIsInstance(kp_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3)
self.assertEqual(kp_params.num_candidates_per_keypoint, 12)
self.assertEqual(kp_params.peak_max_pool_kernel_size, 5)
self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05)
self.assertAlmostEqual(kp_params.box_scale, 1.7)
self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2)
self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio')
self.assertEqual(kp_params.offset_peak_radius, 3)
self.assertEqual(kp_params.per_keypoint_offset, True)
self.assertEqual(kp_params.predict_depth, True)
self.assertEqual(kp_params.per_keypoint_depth, True)
self.assertAlmostEqual(kp_params.keypoint_depth_loss_weight, 0.3)
self.assertAlmostEqual(kp_params.score_distance_multiplier, 11.0)
self.assertAlmostEqual(kp_params.std_dev_multiplier, 2.8)
self.assertAlmostEqual(kp_params.rescoring_threshold, 0.5)
if customize_head_params:
# Set by the config.
self.assertEqual(kp_params.heatmap_head_num_filters, [64, 32])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [5, 3])
self.assertEqual(kp_params.offset_head_num_filters, [128, 64])
self.assertEqual(kp_params.offset_head_kernel_sizes, [5, 3])
else:
# Default values:
self.assertEqual(kp_params.heatmap_head_num_filters, [256])
self.assertEqual(kp_params.heatmap_head_kernel_sizes, [3])
self.assertEqual(kp_params.offset_head_num_filters, [256])
self.assertEqual(kp_params.offset_head_kernel_sizes, [3])
self.assertAlmostEqual(kp_params.gaussian_denom_ratio, 0.3)
self.assertEqual(kp_params.argmax_postprocessing, True)
# Check mask related parameters.
self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7)
self.assertIsInstance(model._mask_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertEqual(model._mask_params.mask_height, 8)
self.assertEqual(model._mask_params.mask_width, 8)
self.assertAlmostEqual(model._mask_params.score_threshold, 0.7)
self.assertAlmostEqual(
model._mask_params.heatmap_bias_init, -2.0, places=4)
if customize_head_params:
self.assertEqual(model._mask_params.mask_head_num_filters, [128, 64])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [5, 3])
else:
self.assertEqual(model._mask_params.mask_head_num_filters, [256])
self.assertEqual(model._mask_params.mask_head_kernel_sizes, [3])
# Check DensePose related parameters.
self.assertEqual(model._densepose_params.class_id, 0)
self.assertIsInstance(model._densepose_params.classification_loss,
losses.WeightedSoftmaxClassificationLoss)
self.assertIsInstance(model._densepose_params.localization_loss,
losses.L1LocalizationLoss)
self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0)
self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0)
self.assertEqual(model._densepose_params.num_parts, 24)
self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5)
self.assertTrue(model._densepose_params.upsample_to_input_res)
self.assertEqual(model._densepose_params.upsample_method, 'bilinear')
self.assertAlmostEqual(
model._densepose_params.heatmap_bias_init, -2.0, places=4)
# Check feature extractor parameters.
self.assertIsInstance(
model._feature_extractor, center_net_hourglass_feature_extractor
.CenterNetHourglassFeatureExtractor)
self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0])
self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6])
self.assertTrue(model._feature_extractor._bgr_ordering)
backbone = model._feature_extractor._network
self.assertIsInstance(backbone, hourglass_network.HourglassNetwork)
self.assertTrue(backbone.num_hourglasses, 1)
def test_create_center_net_model_from_keypoints(self):
"""Test building a CenterNet model from proto txt."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "hourglass_52"
channel_stds: [4, 5, 6]
bgr_ordering: true
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
# Check object center related parameters.
self.assertEqual(model._num_classes, 10)
self.assertEqual(model._center_params.keypoint_weights_for_center,
[1.0, 0.0, 1.0, 0.0])
# Check keypoint estimation related parameters.
kp_params = model._kp_params_dict['human_pose']
self.assertAlmostEqual(kp_params.task_loss_weight, 0.9)
self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3])
self.assertEqual(kp_params.keypoint_labels,
['nose', 'left_shoulder', 'right_shoulder', 'hip'])
def test_create_center_net_model_mobilenet(self):
"""Test building a CenterNet model using bilinear interpolation."""
proto_txt = """
center_net {
num_classes: 10
feature_extractor {
type: "mobilenet_v2_fpn"
depth_multiplier: 2.0
use_separable_conv: true
upsampling_interpolation: "bilinear"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Only add object center and keypoint estimation configs here.
config.center_net.object_center_params.CopyFrom(
self.get_fake_object_center_from_keypoints_proto())
config.center_net.keypoint_estimation_task.append(
self.get_fake_keypoint_proto())
config.center_net.keypoint_label_map_path = (
self.get_fake_label_map_file_path())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
feature_extractor = model._feature_extractor
# Verify the upsampling layers in the FPN use 'bilinear' interpolation.
fpn = feature_extractor.get_layer('model_1')
num_up_sampling2d_layers = 0
for layer in fpn.layers:
if 'up_sampling2d' in layer.name:
num_up_sampling2d_layers += 1
self.assertEqual('bilinear', layer.interpolation)
# Verify that there are up_sampling2d layers.
self.assertGreater(num_up_sampling2d_layers, 0)
# Verify that the FPN ops uses separable conv.
for layer in fpn.layers:
# Convolution layers with kernel size not equal to (1, 1) should be
# separable 2D convolutions.
if 'conv' in layer.name and layer.kernel_size != (1, 1):
self.assertIsInstance(layer, tf.keras.layers.SeparableConv2D)
# Verify that the backbone indeed double the number of channel according to
# the depthmultiplier.
backbone = feature_extractor.get_layer('model')
first_conv = backbone.get_layer('Conv1')
# Note that the first layer typically has 32 filters, but this model has
# a depth multiplier of 2.
self.assertEqual(64, first_conv.filters)
def test_create_center_net_deepmac(self):
"""Test building a CenterNet DeepMAC model."""
proto_txt = """
center_net {
num_classes: 90
feature_extractor {
type: "hourglass_52"
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 512
max_dimension: 512
pad_to_max_dimension: true
}
}
object_detection_task {
task_loss_weight: 1.0
offset_loss_weight: 1.0
scale_loss_weight: 0.1
localization_loss {
l1_localization_loss {
}
}
}
object_center_params {
object_center_loss_weight: 1.0
min_box_overlap_iou: 0.7
max_box_predictions: 100
classification_loss {
penalty_reduced_logistic_focal_loss {
alpha: 2.0
beta: 4.0
}
}
}
deepmac_mask_estimation {
classification_loss {
weighted_sigmoid {}
}
}
}
"""
# Set up the configuration proto.
config = text_format.Parse(proto_txt, model_pb2.DetectionModel())
# Build the model from the configuration.
model = model_builder.build(config, is_training=True)
self.assertIsInstance(model, deepmac_meta_arch.DeepMACMetaArch)
if __name__ == '__main__':
tf.test.main()
| 21,312 | 35.494863 | 80 | py |
models | models-master/research/object_detection/builders/hyperparams_builder_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import tf_version
def _get_scope_key(op):
return getattr(op, '_key_op', str(op))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.')
class HyperparamsBuilderTest(tf.test.TestCase):
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d), scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.separable_conv2d), scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.conv2d_transpose), scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertIn(_get_scope_key(slim.fully_connected), scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = list(scope.values())[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=False)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def test_use_swish_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.')
class KerasHyperparamsBuilderTest(tf.test.TestCase):
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
var = tf.Variable(initializer(shape=shape, dtype=tf.float32))
self.assertAllClose(np.var(var.numpy()), variance, tol, tol)
def test_return_l1_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
result = regularizer(tf.constant(weights)).numpy()
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_l1_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.5)
def test_return_l2_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsInstance(regularizer_weight, float)
self.assertAlmostEqual(regularizer_weight, 0.25)
def test_return_undefined_regularizer_weight_keras(self):
conv_hyperparams_text_proto = """
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer_weight = keras_config.get_regularizer_weight()
self.assertIsNone(regularizer_weight)
def test_return_non_default_batch_norm_params_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertAlmostEqual(batch_norm_params['momentum'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
batch_norm_layer = keras_config.build_batch_norm()
self.assertIsInstance(batch_norm_layer,
freezable_batch_norm.FreezableBatchNorm)
def test_return_non_default_batch_norm_params_keras_override(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params(momentum=0.4)
self.assertAlmostEqual(batch_norm_params['momentum'], 0.4)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
def test_do_not_use_batch_norm_if_default_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertFalse(keras_config.use_batch_norm())
self.assertEqual(keras_config.batch_norm_params(), {})
# The batch norm builder should build an identity Lambda layer
identity_layer = keras_config.build_batch_norm()
self.assertIsInstance(identity_layer,
tf.keras.layers.Lambda)
def test_do_not_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertFalse(hyperparams['use_bias'])
def test_force_use_bias_if_batch_norm_center_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: true
scale: true
epsilon: 0.03
train: true
}
force_use_bias: true
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertTrue(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
hyperparams = keras_config.params()
self.assertTrue(hyperparams['use_bias'])
def test_use_none_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertIsNone(
keras_config.params(include_activation=True)['activation'])
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.identity)
def test_use_relu_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu)
def test_use_relu_6_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu6)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.relu6)
def test_use_swish_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: SWISH
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertIsNone(keras_config.params()['activation'])
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.swish)
activation_layer = keras_config.build_activation_layer()
self.assertIsInstance(activation_layer, tf.keras.layers.Lambda)
self.assertEqual(activation_layer.function, tf.nn.swish)
def test_override_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
new_params = keras_config.params(activation=tf.nn.relu)
self.assertEqual(new_params['activation'], tf.nn.relu)
def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
def test_keras_initializer_by_name(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
keras_initializer_by_name: "glorot_uniform"
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer_arg = keras_config.params()['kernel_initializer']
conv_layer = tf.keras.layers.Conv2D(
filters=16, kernel_size=3, **keras_config.params())
self.assertEqual(initializer_arg, 'glorot_uniform')
self.assertIsInstance(conv_layer.kernel_initializer,
type(tf.keras.initializers.get('glorot_uniform')))
if __name__ == '__main__':
tf.test.main()
| 35,961 | 33.087204 | 80 | py |
models | models-master/research/object_detection/builders/hyperparams_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
class KerasLayerHyperparams(object):
"""
A hyperparameter configuration object for Keras layers used in
Object Detection models.
"""
def __init__(self, hyperparams_config):
"""Builds keras hyperparameter config for layers based on the proto config.
It automatically converts from Slim layer hyperparameter configs to
Keras layer hyperparameters. Namely, it:
- Builds Keras initializers/regularizers instead of Slim ones
- sets weights_regularizer/initializer to kernel_regularizer/initializer
- converts batchnorm decay to momentum
- converts Slim l2 regularizer weights to the equivalent Keras l2 weights
Contains a hyperparameter configuration for ops that specifies kernel
initializer, kernel regularizer, activation. Also contains parameters for
batch norm operators based on the configuration.
Note that if the batch_norm parameters are not specified in the config
(i.e. left to default) then batch norm is excluded from the config.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
self._batch_norm_params = None
self._use_sync_batch_norm = False
if hyperparams_config.HasField('batch_norm'):
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.batch_norm)
elif hyperparams_config.HasField('sync_batch_norm'):
self._use_sync_batch_norm = True
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.sync_batch_norm)
self._force_use_bias = hyperparams_config.force_use_bias
self._activation_fn = _build_activation_fn(hyperparams_config.activation)
# TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
# (Those might use depthwise_* instead of kernel_*)
# We should probably switch to using build_conv2d_layer and
# build_depthwise_conv2d_layer methods instead.
self._op_params = {
'kernel_regularizer': _build_keras_regularizer(
hyperparams_config.regularizer),
'kernel_initializer': _build_initializer(
hyperparams_config.initializer, build_for_keras=True),
'activation': _build_activation_fn(hyperparams_config.activation)
}
def use_batch_norm(self):
return self._batch_norm_params is not None
def use_sync_batch_norm(self):
return self._use_sync_batch_norm
def force_use_bias(self):
return self._force_use_bias
def use_bias(self):
return (self._force_use_bias or not
(self.use_batch_norm() and self.batch_norm_params()['center']))
def batch_norm_params(self, **overrides):
"""Returns a dict containing batchnorm layer construction hyperparameters.
Optionally overrides values in the batchnorm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
**overrides: keyword arguments to override in the hyperparams dictionary
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
if self._batch_norm_params is None:
new_batch_norm_params = dict()
else:
new_batch_norm_params = self._batch_norm_params.copy()
new_batch_norm_params.update(overrides)
return new_batch_norm_params
def build_batch_norm(self, training=None, **overrides):
"""Returns a Batch Normalization layer with the appropriate hyperparams.
If the hyperparams are configured to not use batch normalization,
this will return a Keras Lambda layer that only applies tf.Identity,
without doing any normalization.
Optionally overrides values in the batch_norm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
training: if True, the normalization layer will normalize using the batch
statistics. If False, the normalization layer will be frozen and will
act as if it is being used for inference. If None, the layer
will look up the Keras learning phase at `call` time to decide what to
do.
**overrides: batch normalization construction args to override from the
batch_norm hyperparams dictionary.
Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
or a Keras Lambda layer that applies the identity (if use_batch_norm()
is False)
"""
if self.use_batch_norm():
if self._use_sync_batch_norm:
return freezable_sync_batch_norm.FreezableSyncBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return freezable_batch_norm.FreezableBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return tf.keras.layers.Lambda(tf.identity)
def build_activation_layer(self, name='activation'):
"""Returns a Keras layer that applies the desired activation function.
Args:
name: The name to assign the Keras layer.
Returns: A Keras lambda layer that applies the activation function
specified in the hyperparam config, or applies the identity if the
activation function is None.
"""
if self._activation_fn:
return tf.keras.layers.Lambda(self._activation_fn, name=name)
else:
return tf.keras.layers.Lambda(tf.identity, name=name)
def get_regularizer_weight(self):
"""Returns the l1 or l2 regularizer weight.
Returns: A float value corresponding to the l1 or l2 regularization weight,
or None if neither l1 or l2 regularization is defined.
"""
regularizer = self._op_params['kernel_regularizer']
if hasattr(regularizer, 'l1'):
return float(regularizer.l1)
elif hasattr(regularizer, 'l2'):
return float(regularizer.l2)
else:
return None
def params(self, include_activation=False, **overrides):
"""Returns a dict containing the layer construction hyperparameters to use.
Optionally overrides values in the returned dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
include_activation: If False, activation in the returned dictionary will
be set to `None`, and the activation must be applied via a separate
layer created by `build_activation_layer`. If True, `activation` in the
output param dictionary will be set to the activation function
specified in the hyperparams config.
**overrides: keyword arguments to override in the hyperparams dictionary.
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
new_params = self._op_params.copy()
new_params['activation'] = None
if include_activation:
new_params['activation'] = self._activation_fn
new_params['use_bias'] = self.use_bias()
new_params.update(**overrides)
return new_params
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if no normalization parameters are specified in the config,
(i.e. left to default) then both batch norm and group norm are excluded
from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
if hyperparams_config.force_use_bias:
raise ValueError('Hyperparams force_use_bias only supported by '
'KerasLayerHyperparams.')
if hyperparams_config.HasField('sync_batch_norm'):
raise ValueError('Hyperparams sync_batch_norm only supported by '
'KerasLayerHyperparams.')
normalizer_fn = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
normalizer_fn = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
if hyperparams_config.HasField('group_norm'):
normalizer_fn = slim.group_norm
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
def scope_fn():
with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
if batch_norm_params is not None else
context_manager.IdentityContextManager()):
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_slim_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=normalizer_fn) as sc:
return sc
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
if activation_fn == hyperparams_pb2.Hyperparams.SWISH:
return tf.nn.swish
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_slim_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_keras_regularizer(regularizer):
"""Builds a keras regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
Keras regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
# The Keras L2 regularizer weight differs from the Slim L2 regularizer
# weight by a factor of 2
return tf.keras.regularizers.l2(
float(regularizer.l2_regularizer.weight * 0.5))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer, build_for_keras=False):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
build_for_keras: Whether the initializers should be built for Keras
operators. If false builds for Slim.
Returns:
tf initializer or string corresponding to the tf keras initializer name.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
if build_for_keras:
if initializer.variance_scaling_initializer.uniform:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='uniform')
else:
# In TF 1.9 release and earlier, the truncated_normal distribution was
# not supported correctly. So, in these earlier versions of tensorflow,
# the ValueError will be raised, and we manually truncate the
# distribution scale.
#
# It is insufficient to just set distribution to `normal` from the
# start, because the `normal` distribution in newer Tensorflow versions
# creates a truncated distribution, whereas it created untruncated
# distributions in older versions.
try:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='truncated_normal')
except ValueError:
truncate_constant = 0.87962566103423978
truncated_scale = initializer.variance_scaling_initializer.factor / (
truncate_constant * truncate_constant
)
return tf.variance_scaling_initializer(
scale=truncated_scale,
mode=mode.lower(),
distribution='normal')
else:
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
if initializer_oneof == 'keras_initializer_by_name':
if build_for_keras:
return initializer.keras_initializer_by_name
else:
raise ValueError(
'Unsupported non-Keras usage of keras_initializer_by_name: {}'.format(
initializer.keras_initializer_by_name))
if initializer_oneof is None:
return None
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
def _build_keras_batch_norm_params(batch_norm):
"""Build a dictionary of Keras BatchNormalization params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
Returns:
A dictionary containing Keras BatchNormalization parameters.
"""
# Note: Although decay is defined to be 1 - momentum in batch_norm,
# decay in the slim batch_norm layers was erroneously defined and is
# actually the same as momentum in the Keras batch_norm layers.
# For context, see: github.com/keras-team/keras/issues/6839
batch_norm_params = {
'momentum': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
}
return batch_norm_params
| 18,424 | 37.871308 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features_utils.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for the global model training."""
import os
from absl import logging
import numpy as np
import tensorflow as tf
from delf.python.datasets.revisited_op import dataset as revisited_dataset
class AverageMeter():
"""Computes and stores the average and current value of loss."""
def __init__(self):
"""Initialization of the AverageMeter."""
self.reset()
def reset(self):
"""Resets all the values."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""Updates values in the AverageMeter.
Args:
val: Float, loss value.
n: Integer, number of instances.
"""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def compute_metrics_and_print(dataset_name,
sorted_index_ids,
ground_truth,
desired_pr_ranks=None,
log=True):
"""Computes and logs ground-truth metrics for Revisited datasets.
Args:
dataset_name: String, name of the dataset.
sorted_index_ids: Integer NumPy array of shape [#queries, #index_images].
For each query, contains an array denoting the most relevant index images,
sorted from most to least relevant.
ground_truth: List containing ground-truth information for dataset. Each
entry is a dict corresponding to the ground-truth information for a query.
The dict has keys 'ok' and 'junk', mapping to a NumPy array of integers.
desired_pr_ranks: List of integers containing the desired precision/recall
ranks to be reported. E.g., if precision@1/recall@1 and
precision@10/recall@10 are desired, this should be set to [1, 10]. The
largest item should be <= #sorted_index_ids. Default: [1, 5, 10].
log: Whether to log results using logging.info().
Returns:
mAP: (metricsE, metricsM, metricsH) Tuple of the metrics for different
levels of complexity. Each metrics is a list containing:
mean_average_precision (float), mean_precisions (NumPy array of
floats, with shape [len(desired_pr_ranks)]), mean_recalls (NumPy array
of floats, with shape [len(desired_pr_ranks)]), average_precisions
(NumPy array of floats, with shape [#queries]), precisions (NumPy array of
floats, with shape [#queries, len(desired_pr_ranks)]), recalls (NumPy
array of floats, with shape [#queries, len(desired_pr_ranks)]).
Raises:
ValueError: If an unknown dataset name is provided as an argument.
"""
if dataset_name not in revisited_dataset.DATASET_NAMES:
raise ValueError('Unknown dataset: {}!'.format(dataset))
if desired_pr_ranks is None:
desired_pr_ranks = [1, 5, 10]
(easy_ground_truth, medium_ground_truth,
hard_ground_truth) = revisited_dataset.ParseEasyMediumHardGroundTruth(
ground_truth)
metrics_easy = revisited_dataset.ComputeMetrics(sorted_index_ids,
easy_ground_truth,
desired_pr_ranks)
metrics_medium = revisited_dataset.ComputeMetrics(sorted_index_ids,
medium_ground_truth,
desired_pr_ranks)
metrics_hard = revisited_dataset.ComputeMetrics(sorted_index_ids,
hard_ground_truth,
desired_pr_ranks)
debug_and_log(
'>> {}: mAP E: {}, M: {}, H: {}'.format(
dataset_name, np.around(metrics_easy[0] * 100, decimals=2),
np.around(metrics_medium[0] * 100, decimals=2),
np.around(metrics_hard[0] * 100, decimals=2)),
log=log)
debug_and_log(
'>> {}: mP@k{} E: {}, M: {}, H: {}'.format(
dataset_name, desired_pr_ranks,
np.around(metrics_easy[1] * 100, decimals=2),
np.around(metrics_medium[1] * 100, decimals=2),
np.around(metrics_hard[1] * 100, decimals=2)),
log=log)
return metrics_easy, metrics_medium, metrics_hard
def htime(time_difference):
"""Time formatting function.
Depending on the value of `time_difference` outputs time in an appropriate
time format.
Args:
time_difference: Float, time difference between the two events.
Returns:
time: String representing time in an appropriate time format.
"""
time_difference = round(time_difference)
days = time_difference // 86400
hours = time_difference // 3600 % 24
minutes = time_difference // 60 % 60
seconds = time_difference % 60
if days > 0:
return '{:d}d {:d}h {:d}m {:d}s'.format(days, hours, minutes, seconds)
if hours > 0:
return '{:d}h {:d}m {:d}s'.format(hours, minutes, seconds)
if minutes > 0:
return '{:d}m {:d}s'.format(minutes, seconds)
return '{:d}s'.format(seconds)
def debug_and_log(msg, debug=True, log=True, debug_on_the_same_line=False):
"""Outputs `msg` to both stdout (if in the debug mode) and the log file.
Args:
msg: String, message to be logged.
debug: Bool, if True, will print `msg` to stdout.
log: Bool, if True, will redirect `msg` to the logfile.
debug_on_the_same_line: Bool, if True, will print `msg` to stdout without a
new line. When using this mode, logging to a logfile is disabled.
"""
if debug_on_the_same_line:
print(msg, end='')
return
if debug:
print(msg)
if log:
logging.info(msg)
def get_standard_keras_models():
"""Gets the standard keras model names.
Returns:
model_names: List, names of the standard keras models.
"""
model_names = sorted(
name for name in tf.keras.applications.__dict__
if not name.startswith('__') and
callable(tf.keras.applications.__dict__[name]))
return model_names
def create_model_directory(training_dataset, arch, pool, whitening, pretrained,
loss, loss_margin, optimizer, lr, weight_decay,
neg_num, query_size, pool_size, batch_size,
update_every, image_size, directory):
"""Based on the model parameters, creates the model directory.
If the model directory does not exist, the directory is created.
Args:
training_dataset: String, training dataset name.
arch: String, model architecture.
pool: String, pooling option.
whitening: Bool, whether the model is trained with global whitening.
pretrained: Bool, whether the model is initialized with the precomputed
weights.
loss: String, training loss type.
loss_margin: Float, loss margin.
optimizer: Sting, used optimizer.
lr: Float, initial learning rate.
weight_decay: Float, weight decay.
neg_num: Integer, Number of negative images per train/val tuple.
query_size: Integer, number of queries per one training epoch.
pool_size: Integer, size of the pool for hard negative mining.
batch_size: Integer, batch size.
update_every: Integer, frequency of the model weights update.
image_size: Integer, maximum size of longer image side used for training.
directory: String, destination where trained network should be saved.
Returns:
folder: String, path to the model folder.
"""
folder = '{}_{}_{}'.format(training_dataset, arch, pool)
if whitening:
folder += '_whiten'
if not pretrained:
folder += '_notpretrained'
folder += ('_{}_m{:.2f}_{}_lr{:.1e}_wd{:.1e}_nnum{}_qsize{}_psize{}_bsize{}'
'_uevery{}_imsize{}').format(loss, loss_margin, optimizer, lr,
weight_decay, neg_num, query_size,
pool_size, batch_size, update_every,
image_size)
folder = os.path.join(directory, folder)
debug_and_log(
'>> Creating directory if does not exist:\n>> \'{}\''.format(folder))
if not os.path.exists(folder):
os.makedirs(folder)
return folder
| 8,790 | 38.599099 | 80 | py |
models | models-master/research/delf/delf/python/training/train.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for DELF/G on Google Landmarks Dataset.
Uses classification loss, with MirroredStrategy, to support running on multiple
GPUs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_probability as tfp
# Placeholder for internal import. Do not remove this line.
from delf.python.datasets.google_landmarks_dataset import googlelandmarks as gld
from delf.python.training.model import delf_model
from delf.python.training.model import delg_model
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug', False, 'Debug mode.')
flags.DEFINE_string('logdir', '/tmp/delf', 'WithTensorBoard logdir.')
flags.DEFINE_string('train_file_pattern', '/tmp/data/train*',
'File pattern of training dataset files.')
flags.DEFINE_string('validation_file_pattern', '/tmp/data/validation*',
'File pattern of validation dataset files.')
flags.DEFINE_enum(
'dataset_version', 'gld_v1', ['gld_v1', 'gld_v2', 'gld_v2_clean'],
'Google Landmarks dataset version, used to determine the number of '
'classes.')
flags.DEFINE_integer('seed', 0, 'Seed to training dataset.')
flags.DEFINE_float('initial_lr', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('batch_size', 32, 'Global batch size.')
flags.DEFINE_integer('max_iters', 500000, 'Maximum iterations.')
flags.DEFINE_boolean('block3_strides', True, 'Whether to use block3_strides.')
flags.DEFINE_boolean('use_augmentation', True,
'Whether to use ImageNet style augmentation.')
flags.DEFINE_string(
'imagenet_checkpoint', None,
'ImageNet checkpoint for ResNet backbone. If None, no checkpoint is used.')
flags.DEFINE_float(
'attention_loss_weight', 1.0,
'Weight to apply to the attention loss when calculating the '
'total loss of the model.')
flags.DEFINE_boolean('delg_global_features', False,
'Whether to train a DELG model.')
flags.DEFINE_float(
'delg_gem_power', 3.0, 'Power for Generalized Mean pooling. Used only if '
'delg_global_features=True.')
flags.DEFINE_integer(
'delg_embedding_layer_dim', 2048,
'Size of the FC whitening layer (embedding layer). Used only if'
'delg_global_features:True.')
flags.DEFINE_float(
'delg_scale_factor_init', 45.25,
'Initial value of the scaling factor of the cosine logits. The default '
'value is sqrt(2048). Used only if delg_global_features=True.')
flags.DEFINE_float('delg_arcface_margin', 0.1,
'ArcFace margin. Used only if delg_global_features=True.')
flags.DEFINE_integer('image_size', 321, 'Size of each image side to use.')
flags.DEFINE_boolean('use_autoencoder', True,
'Whether to train an autoencoder.')
flags.DEFINE_float(
'reconstruction_loss_weight', 10.0,
'Weight to apply to the reconstruction loss from the autoencoder when'
'calculating total loss of the model. Used only if use_autoencoder=True.')
flags.DEFINE_float(
'autoencoder_dimensions', 128,
'Number of dimensions of the autoencoder. Used only if'
'use_autoencoder=True.')
flags.DEFINE_float(
'local_feature_map_channels', 1024,
'Number of channels at backbone layer used for local feature extraction. '
'Default value 1024 is the number of channels of block3. Used only if'
'use_autoencoder=True.')
def _record_accuracy(metric, logits, labels):
"""Record accuracy given predicted logits and ground-truth labels."""
softmax_probabilities = tf.keras.layers.Softmax()(logits)
metric.update_state(labels, softmax_probabilities)
def _attention_summaries(scores, global_step):
"""Record statistics of the attention score."""
tf.summary.image(
'batch_attention',
scores / tf.reduce_max(scores + 1e-3),
step=global_step)
tf.summary.scalar('attention/max', tf.reduce_max(scores), step=global_step)
tf.summary.scalar('attention/min', tf.reduce_min(scores), step=global_step)
tf.summary.scalar('attention/mean', tf.reduce_mean(scores), step=global_step)
tf.summary.scalar(
'attention/percent_25',
tfp.stats.percentile(scores, 25.0),
step=global_step)
tf.summary.scalar(
'attention/percent_50',
tfp.stats.percentile(scores, 50.0),
step=global_step)
tf.summary.scalar(
'attention/percent_75',
tfp.stats.percentile(scores, 75.0),
step=global_step)
def create_model(num_classes):
"""Define DELF model, and initialize classifiers."""
if FLAGS.delg_global_features:
model = delg_model.Delg(
block3_strides=FLAGS.block3_strides,
name='DELG',
gem_power=FLAGS.delg_gem_power,
embedding_layer_dim=FLAGS.delg_embedding_layer_dim,
scale_factor_init=FLAGS.delg_scale_factor_init,
arcface_margin=FLAGS.delg_arcface_margin,
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
else:
model = delf_model.Delf(
block3_strides=FLAGS.block3_strides,
name='DELF',
use_dim_reduction=FLAGS.use_autoencoder,
reduced_dimension=FLAGS.autoencoder_dimensions,
dim_expand_channels=FLAGS.local_feature_map_channels)
model.init_classifiers(num_classes)
return model
def _learning_rate_schedule(global_step_value, max_iters, initial_lr):
"""Calculates learning_rate with linear decay.
Args:
global_step_value: int, global step.
max_iters: int, maximum iterations.
initial_lr: float, initial learning rate.
Returns:
lr: float, learning rate.
"""
lr = initial_lr * (1.0 - global_step_value / max_iters)
return lr
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
#-------------------------------------------------------------
# Log flags used.
logging.info('Running training script with\n')
logging.info('logdir= %s', FLAGS.logdir)
logging.info('initial_lr= %f', FLAGS.initial_lr)
logging.info('block3_strides= %s', str(FLAGS.block3_strides))
# ------------------------------------------------------------
# Create the strategy.
strategy = tf.distribute.MirroredStrategy()
logging.info('Number of devices: %d', strategy.num_replicas_in_sync)
if FLAGS.debug:
print('Number of devices:', strategy.num_replicas_in_sync)
max_iters = FLAGS.max_iters
global_batch_size = FLAGS.batch_size
image_size = FLAGS.image_size
num_eval_batches = int(50000 / global_batch_size)
report_interval = 100
eval_interval = 1000
save_interval = 1000
initial_lr = FLAGS.initial_lr
clip_val = tf.constant(10.0)
if FLAGS.debug:
tf.config.run_functions_eagerly(True)
global_batch_size = 4
max_iters = 100
num_eval_batches = 1
save_interval = 1
report_interval = 10
# Determine the number of classes based on the version of the dataset.
gld_info = gld.GoogleLandmarksInfo()
num_classes = gld_info.num_classes[FLAGS.dataset_version]
# ------------------------------------------------------------
# Create the distributed train/validation sets.
train_dataset = gld.CreateDataset(
file_pattern=FLAGS.train_file_pattern,
batch_size=global_batch_size,
image_size=image_size,
augmentation=FLAGS.use_augmentation,
seed=FLAGS.seed)
validation_dataset = gld.CreateDataset(
file_pattern=FLAGS.validation_file_pattern,
batch_size=global_batch_size,
image_size=image_size,
augmentation=False,
seed=FLAGS.seed)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
validation_dist_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
train_iter = iter(train_dist_dataset)
validation_iter = iter(validation_dist_dataset)
# Create a checkpoint directory to store the checkpoints.
checkpoint_prefix = os.path.join(FLAGS.logdir, 'delf_tf2-ckpt')
# ------------------------------------------------------------
# Finally, we do everything in distributed scope.
with strategy.scope():
# Compute loss.
# Set reduction to `none` so we can do the reduction afterwards and divide
# by global batch size.
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_batch_size)
# Set up metrics.
desc_validation_loss = tf.keras.metrics.Mean(name='desc_validation_loss')
attn_validation_loss = tf.keras.metrics.Mean(name='attn_validation_loss')
desc_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='desc_train_accuracy')
attn_train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='attn_train_accuracy')
desc_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='desc_validation_accuracy')
attn_validation_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='attn_validation_accuracy')
# ------------------------------------------------------------
# Setup DELF model and optimizer.
model = create_model(num_classes)
logging.info('Model, datasets loaded.\nnum_classes= %d', num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=initial_lr, momentum=0.9)
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.logdir, 'train_logs'), flush_millis=10000)
# Setup checkpoint directory.
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint,
checkpoint_prefix,
max_to_keep=10,
keep_checkpoint_every_n_hours=3)
# Restores the checkpoint, if existing.
checkpoint.restore(manager.latest_checkpoint)
# ------------------------------------------------------------
# Train step to run on one GPU.
def train_step(inputs):
"""Train one batch."""
images, labels = inputs
# Temporary workaround to avoid some corrupted labels.
labels = tf.clip_by_value(labels, 0, model.num_classes)
def _backprop_loss(tape, loss, weights):
"""Backpropogate losses using clipped gradients.
Args:
tape: gradient tape.
loss: scalar Tensor, loss value.
weights: keras model weights.
"""
gradients = tape.gradient(loss, weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, weights))
# Record gradients and loss through backbone.
with tf.GradientTape() as gradient_tape:
# Make a forward pass to calculate prelogits.
(desc_prelogits, attn_prelogits, attn_scores, backbone_blocks,
dim_expanded_features, _) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
if FLAGS.delg_global_features:
desc_logits = model.desc_classification(desc_prelogits, labels)
else:
desc_logits = model.desc_classification(desc_prelogits)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Calculate reconstruction loss between the attention prelogits and the
# backbone.
if FLAGS.use_autoencoder:
block3 = tf.stop_gradient(backbone_blocks['block3'])
reconstruction_loss = tf.math.reduce_mean(
tf.keras.losses.MSE(block3, dim_expanded_features))
else:
reconstruction_loss = 0
# Cumulate global loss, attention loss and reconstruction loss.
total_loss = (
desc_loss + FLAGS.attention_loss_weight * attn_loss +
FLAGS.reconstruction_loss_weight * reconstruction_loss)
# Perform backpropagation through the descriptor and attention layers
# together. Note that this will increment the number of iterations of
# "optimizer".
_backprop_loss(gradient_tape, total_loss, model.trainable_weights)
# Step number, for summary purposes.
global_step = optimizer.iterations
# Input image-related summaries.
tf.summary.image('batch_images', (images + 1.0) / 2.0, step=global_step)
tf.summary.scalar(
'image_range/max', tf.reduce_max(images), step=global_step)
tf.summary.scalar(
'image_range/min', tf.reduce_min(images), step=global_step)
# Attention and sparsity summaries.
_attention_summaries(attn_scores, global_step)
activations_zero_fractions = {
'sparsity/%s' % k: tf.nn.zero_fraction(v)
for k, v in backbone_blocks.items()
}
for k, v in activations_zero_fractions.items():
tf.summary.scalar(k, v, step=global_step)
# Scaling factor summary for cosine logits for a DELG model.
if FLAGS.delg_global_features:
tf.summary.scalar(
'desc/scale_factor', model.scale_factor, step=global_step)
# Record train accuracies.
_record_accuracy(desc_train_accuracy, desc_logits, labels)
_record_accuracy(attn_train_accuracy, attn_logits, labels)
return desc_loss, attn_loss, reconstruction_loss
# ------------------------------------------------------------
def validation_step(inputs):
"""Validate one batch."""
images, labels = inputs
labels = tf.clip_by_value(labels, 0, model.num_classes)
# Get descriptor predictions.
blocks = {}
prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
if FLAGS.delg_global_features:
logits = model.desc_classification(prelogits, labels, training=False)
else:
logits = model.desc_classification(prelogits, training=False)
softmax_probabilities = tf.keras.layers.Softmax()(logits)
validation_loss = loss_object(labels, logits)
desc_validation_loss.update_state(validation_loss)
desc_validation_accuracy.update_state(labels, softmax_probabilities)
# Get attention predictions.
block3 = blocks['block3'] # pytype: disable=key-error
prelogits, _, _ = model.attention(block3, training=False)
logits = model.attn_classification(prelogits, training=False)
softmax_probabilities = tf.keras.layers.Softmax()(logits)
validation_loss = loss_object(labels, logits)
attn_validation_loss.update_state(validation_loss)
attn_validation_accuracy.update_state(labels, softmax_probabilities)
return desc_validation_accuracy.result(), attn_validation_accuracy.result(
)
# `run` replicates the provided computation and runs it
# with the distributed input.
@tf.function
def distributed_train_step(dataset_inputs):
"""Get the actual losses."""
# Each (desc, attn) is a list of 3 losses - crossentropy, reg, total.
desc_per_replica_loss, attn_per_replica_loss, recon_per_replica_loss = (
strategy.run(train_step, args=(dataset_inputs,)))
# Reduce over the replicas.
desc_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, desc_per_replica_loss, axis=None)
attn_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, attn_per_replica_loss, axis=None)
recon_global_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, recon_per_replica_loss, axis=None)
return desc_global_loss, attn_global_loss, recon_global_loss
@tf.function
def distributed_validation_step(dataset_inputs):
return strategy.run(validation_step, args=(dataset_inputs,))
# ------------------------------------------------------------
# *** TRAIN LOOP ***
with summary_writer.as_default():
record_cond = lambda: tf.equal(optimizer.iterations % report_interval, 0)
with tf.summary.record_if(record_cond):
global_step_value = optimizer.iterations.numpy()
# TODO(dananghel): try to load pretrained weights at backbone creation.
# Load pretrained weights for ResNet50 trained on ImageNet.
if (FLAGS.imagenet_checkpoint is not None) and (not global_step_value):
logging.info('Attempting to load ImageNet pretrained weights.')
input_batch = next(train_iter)
_, _, _ = distributed_train_step(input_batch)
model.backbone.restore_weights(FLAGS.imagenet_checkpoint)
logging.info('Done.')
else:
logging.info('Skip loading ImageNet pretrained weights.')
if FLAGS.debug:
model.backbone.log_weights()
last_summary_step_value = None
last_summary_time = None
while global_step_value < max_iters:
# input_batch : images(b, h, w, c), labels(b,).
try:
input_batch = next(train_iter)
except tf.errors.OutOfRangeError:
# Break if we run out of data in the dataset.
logging.info('Stopping training at global step %d, no more data',
global_step_value)
break
# Set learning rate and run the training step over num_gpu gpus.
optimizer.learning_rate = _learning_rate_schedule(
optimizer.iterations.numpy(), max_iters, initial_lr)
desc_dist_loss, attn_dist_loss, recon_dist_loss = (
distributed_train_step(input_batch))
# Step number, to be used for summary/logging.
global_step = optimizer.iterations
global_step_value = global_step.numpy()
# LR, losses and accuracies summaries.
tf.summary.scalar(
'learning_rate', optimizer.learning_rate, step=global_step)
tf.summary.scalar(
'loss/desc/crossentropy', desc_dist_loss, step=global_step)
tf.summary.scalar(
'loss/attn/crossentropy', attn_dist_loss, step=global_step)
if FLAGS.use_autoencoder:
tf.summary.scalar(
'loss/recon/mse', recon_dist_loss, step=global_step)
tf.summary.scalar(
'train_accuracy/desc',
desc_train_accuracy.result(),
step=global_step)
tf.summary.scalar(
'train_accuracy/attn',
attn_train_accuracy.result(),
step=global_step)
# Summary for number of global steps taken per second.
current_time = time.time()
if (last_summary_step_value is not None and
last_summary_time is not None):
tf.summary.scalar(
'global_steps_per_sec',
(global_step_value - last_summary_step_value) /
(current_time - last_summary_time),
step=global_step)
if tf.summary.should_record_summaries().numpy():
last_summary_step_value = global_step_value
last_summary_time = current_time
# Print to console if running locally.
if FLAGS.debug:
if global_step_value % report_interval == 0:
print(global_step.numpy())
print('desc:', desc_dist_loss.numpy())
print('attn:', attn_dist_loss.numpy())
# Validate once in {eval_interval*n, n \in N} steps.
if global_step_value % eval_interval == 0:
for i in range(num_eval_batches):
try:
validation_batch = next(validation_iter)
desc_validation_result, attn_validation_result = (
distributed_validation_step(validation_batch))
except tf.errors.OutOfRangeError:
logging.info('Stopping eval at batch %d, no more data', i)
break
# Log validation results to tensorboard.
tf.summary.scalar(
'validation/desc', desc_validation_result, step=global_step)
tf.summary.scalar(
'validation/attn', attn_validation_result, step=global_step)
logging.info('\nValidation(%f)\n', global_step_value)
logging.info(': desc: %f\n', desc_validation_result.numpy())
logging.info(': attn: %f\n', attn_validation_result.numpy())
# Print to console.
if FLAGS.debug:
print('Validation: desc:', desc_validation_result.numpy())
print(' : attn:', attn_validation_result.numpy())
# Save checkpoint once (each save_interval*n, n \in N) steps, or if
# this is the last iteration.
# TODO(andrearaujo): save only in one of the two ways. They are
# identical, the only difference is that the manager adds some extra
# prefixes and variables (eg, optimizer variables).
if (global_step_value % save_interval
== 0) or (global_step_value >= max_iters):
save_path = manager.save(checkpoint_number=global_step_value)
logging.info('Saved (%d) at %s', global_step_value, save_path)
file_path = '%s/delf_weights' % FLAGS.logdir
model.save_weights(file_path, save_format='tf')
logging.info('Saved weights (%d) at %s', global_step_value,
file_path)
# Reset metrics for next step.
desc_train_accuracy.reset_states()
attn_train_accuracy.reset_states()
desc_validation_loss.reset_states()
attn_validation_loss.reset_states()
desc_validation_accuracy.reset_states()
attn_validation_accuracy.reset_states()
logging.info('Finished training for %d steps.', max_iters)
if __name__ == '__main__':
app.run(main)
| 22,754 | 39.706619 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features/train_utils.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utilities for Global Features model."""
import os
import pickle
import time
import numpy as np
import tensorflow as tf
from delf.python import whiten
from delf.python.datasets.revisited_op import dataset as test_dataset
from delf.python.datasets.sfm120k import sfm120k
from delf.python.training import global_features_utils
from delf.python.training.model import global_model
def _compute_loss_and_gradient(criterion, model, input, target, neg_num=5):
"""Records gradients and loss through the network.
Args:
criterion: Loss function.
model: Network for the gradient computation.
input: Tuple of query, positive and negative images.
target: List of indexes to specify queries (-1), positives(1), negatives(0).
neg_num: Integer, number of negatives per a tuple.
Returns:
loss: Loss for the training step.
gradients: Computed gradients for the network trainable variables.
"""
# Record gradients and loss through the network.
with tf.GradientTape() as tape:
descriptors = tf.zeros(shape=(0, model.meta['outputdim']), dtype=tf.float32)
for img in input:
# Compute descriptor vector for each image.
o = model(tf.expand_dims(img, axis=0), training=True)
descriptors = tf.concat([descriptors, o], 0)
queries = descriptors[target == -1]
positives = descriptors[target == 1]
negatives = descriptors[target == 0]
negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num,
model.meta['outputdim']])
# Loss calculation.
loss = criterion(queries, positives, negatives)
return loss, tape.gradient(loss, model.trainable_variables)
def train_val_one_epoch(
loader, model, criterion, optimizer, epoch, train=True, batch_size=5,
query_size=2000, neg_num=5, update_every=1, debug=False):
"""Executes either training or validation step based on `train` value.
Args:
loader: Training/validation iterable dataset.
model: Network to train/validate.
criterion: Loss function.
optimizer: Network optimizer.
epoch: Integer, epoch number.
train: Bool, specifies training or validation phase.
batch_size: Integer, number of (q,p,n1,...,nN) tuples in a mini-batch.
query_size: Integer, number of queries randomly drawn per one training
epoch.
neg_num: Integer, number of negatives per a tuple.
update_every: Integer, update model weights every N batches, used to
handle relatively large batches batch_size effectively becomes
update_every x batch_size.
debug: Bool, whether debug mode is used.
Returns:
average_epoch_loss: Average epoch loss.
"""
batch_time = global_features_utils.AverageMeter()
data_time = global_features_utils.AverageMeter()
losses = global_features_utils.AverageMeter()
# Retrieve all trainable variables we defined in the graph.
tvs = model.trainable_variables
accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs]
end = time.time()
batch_num = 0
print_frequency = 10
all_batch_num = query_size // batch_size
state = 'Train' if train else 'Val'
global_features_utils.debug_and_log('>> {} step:'.format(state))
# For every batch in the dataset; Stops when all batches in the dataset have
# been processed.
while True:
data_time.update(time.time() - end)
if train:
try:
# Train on one batch.
# Each image in the batch is loaded into memory consecutively.
for _ in range(batch_size):
# Because the images are not necessarily of the same size, we can't
# set the batch size with .batch().
batch = loader.get_next()
input_tuple = batch[0:-1]
target_tuple = batch[-1]
loss_value, grads = _compute_loss_and_gradient(
criterion, model, input_tuple, target_tuple, neg_num)
losses.update(loss_value)
# Accumulate gradients.
accum_grads += grads
# Perform weight update if required.
if (batch_num + 1) % update_every == 0 or (
batch_num + 1) == all_batch_num:
# Do one step for multiple batches. Accumulated gradients are
# used.
optimizer.apply_gradients(
zip(accum_grads, model.trainable_variables))
accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs]
# We break when we run out of range, i.e., we exhausted all dataset
# images.
except tf.errors.OutOfRangeError:
break
else:
# Validate one batch.
# We load full batch into memory.
input = []
target = []
try:
for _ in range(batch_size):
# Because the images are not necessarily of the same size, we can't
# set the batch size with .batch().
batch = loader.get_next()
input.append(batch[0:-1])
target.append(batch[-1])
# We break when we run out of range, i.e., we exhausted all dataset
# images.
except tf.errors.OutOfRangeError:
break
descriptors = tf.zeros(shape=(0, model.meta['outputdim']),
dtype=tf.float32)
for input_tuple in input:
for img in input_tuple:
# Compute the global descriptor vector.
model_out = model(tf.expand_dims(img, axis=0), training=False)
descriptors = tf.concat([descriptors, model_out], 0)
# No need to reduce memory consumption (no backward pass):
# Compute loss for the full batch.
queries = descriptors[target == -1]
positives = descriptors[target == 1]
negatives = descriptors[target == 0]
negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num,
model.meta['outputdim']])
loss = criterion(queries, positives, negatives)
# Record loss.
losses.update(loss / batch_size, batch_size)
# Measure elapsed time.
batch_time.update(time.time() - end)
end = time.time()
# Record immediate loss and elapsed time.
if debug and ((batch_num + 1) % print_frequency == 0 or
batch_num == 0 or (batch_num + 1) == all_batch_num):
global_features_utils.debug_and_log(
'>> {0}: [{1} epoch][{2}/{3} batch]\t Time val: {'
'batch_time.val:.3f} '
'(Batch Time avg: {batch_time.avg:.3f})\t Data {'
'data_time.val:.3f} ('
'Time avg: {data_time.avg:.3f})\t Immediate loss value: {'
'loss.val:.4f} '
'(Loss avg: {loss.avg:.4f})'.format(
state, epoch, batch_num + 1, all_batch_num,
batch_time=batch_time,
data_time=data_time, loss=losses), debug=True, log=False)
batch_num += 1
return losses.avg
def test_retrieval(datasets, net, epoch, writer=None, model_directory=None,
precompute_whitening=None, data_root='data', multiscale=[1.],
test_image_size=1024):
"""Testing step.
Evaluates the network on the provided test datasets by computing single-scale
mAP for easy/medium/hard cases. If `writer` is specified, saves the mAP
values in a tensorboard supported format.
Args:
datasets: List of dataset names for model testing (from
`_TEST_DATASET_NAMES`).
net: Network to evaluate.
epoch: Integer, epoch number.
writer: Tensorboard writer.
model_directory: String, path to the model directory.
precompute_whitening: Dataset used to learn whitening. If no
precomputation required, then `None`. Only 'retrieval-SfM-30k' and
'retrieval-SfM-120k' datasets are supported for whitening pre-computation.
data_root: Absolute path to the data folder.
multiscale: List of scales for multiscale testing.
test_image_size: Integer, maximum size of the test images.
"""
global_features_utils.debug_and_log(">> Testing step:")
global_features_utils.debug_and_log(
'>> Evaluating network on test datasets...')
# Precompute whitening.
if precompute_whitening is not None:
# If whitening already precomputed, load it and skip the computations.
filename = os.path.join(
model_directory, 'learned_whitening_mP_{}_epoch.pkl'.format(epoch))
filename_layer = os.path.join(
model_directory,
'learned_whitening_layer_config_{}_epoch.pkl'.format(
epoch))
if tf.io.gfile.exists(filename):
global_features_utils.debug_and_log(
'>> {}: Whitening for this epoch is already precomputed. '
'Loading...'.format(precompute_whitening))
with tf.io.gfile.GFile(filename, 'rb') as learned_whitening_file:
learned_whitening = pickle.load(learned_whitening_file)
else:
start = time.time()
global_features_utils.debug_and_log(
'>> {}: Learning whitening...'.format(precompute_whitening))
# Loading db.
db_root = os.path.join(data_root, 'train', precompute_whitening)
ims_root = os.path.join(db_root, 'ims')
db_filename = os.path.join(db_root,
'{}-whiten.pkl'.format(precompute_whitening))
with tf.io.gfile.GFile(db_filename, 'rb') as f:
db = pickle.load(f)
images = [sfm120k.id2filename(db['cids'][i], ims_root) for i in
range(len(db['cids']))]
# Extract whitening vectors.
global_features_utils.debug_and_log(
'>> {}: Extracting...'.format(precompute_whitening))
wvecs = global_model.extract_global_descriptors_from_list(net, images,
test_image_size)
# Learning whitening.
global_features_utils.debug_and_log(
'>> {}: Learning...'.format(precompute_whitening))
wvecs = wvecs.numpy()
mean_vector, projection_matrix = whiten.whitenlearn(wvecs, db['qidxs'],
db['pidxs'])
learned_whitening = {'m': mean_vector, 'P': projection_matrix}
global_features_utils.debug_and_log(
'>> {}: Elapsed time: {}'.format(precompute_whitening,
global_features_utils.htime(
time.time() - start)))
# Save learned_whitening parameters for a later use.
with tf.io.gfile.GFile(filename, 'wb') as learned_whitening_file:
pickle.dump(learned_whitening, learned_whitening_file)
# Saving whitening as a layer.
bias = -np.dot(mean_vector.T, projection_matrix.T)
whitening_layer = tf.keras.layers.Dense(
net.meta['outputdim'],
activation=None,
use_bias=True,
kernel_initializer=tf.keras.initializers.Constant(
projection_matrix.T),
bias_initializer=tf.keras.initializers.Constant(bias)
)
with tf.io.gfile.GFile(filename_layer, 'wb') as learned_whitening_file:
pickle.dump(whitening_layer.get_config(), learned_whitening_file)
else:
learned_whitening = None
# Evaluate on test datasets.
for dataset in datasets:
start = time.time()
# Prepare config structure for the test dataset.
cfg = test_dataset.CreateConfigForTestDataset(dataset,
os.path.join(data_root))
images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
bounding_boxes = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
# Extract database and query vectors.
global_features_utils.debug_and_log(
'>> {}: Extracting database images...'.format(dataset))
vecs = global_model.extract_global_descriptors_from_list(
net, images, test_image_size, scales=multiscale)
global_features_utils.debug_and_log(
'>> {}: Extracting query images...'.format(dataset))
qvecs = global_model.extract_global_descriptors_from_list(
net, qimages, test_image_size, bounding_boxes,
scales=multiscale)
global_features_utils.debug_and_log('>> {}: Evaluating...'.format(dataset))
# Convert the obtained descriptors to numpy.
vecs = vecs.numpy()
qvecs = qvecs.numpy()
# Search, rank and print test set metrics.
_calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg,
writer, epoch, whiten=False)
if learned_whitening is not None:
# Whiten the vectors.
mean_vector = learned_whitening['m']
projection_matrix = learned_whitening['P']
vecs_lw = whiten.whitenapply(vecs, mean_vector, projection_matrix)
qvecs_lw = whiten.whitenapply(qvecs, mean_vector, projection_matrix)
# Search, rank, and print.
_calculate_metrics_and_export_to_tensorboard(
vecs_lw, qvecs_lw, dataset, cfg, writer, epoch, whiten=True)
global_features_utils.debug_and_log(
'>> {}: Elapsed time: {}'.format(
dataset, global_features_utils.htime(time.time() - start)))
def _calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg,
writer, epoch, whiten=False):
"""
Calculates metrics and exports them to tensorboard.
Args:
vecs: Numpy array dataset global descriptors.
qvecs: Numpy array query global descriptors.
dataset: String, one of `_TEST_DATASET_NAMES`.
cfg: Dataset configuration.
writer: Tensorboard writer.
epoch: Integer, epoch number.
whiten: Boolean, whether the metrics are with for whitening used as a
post-processing step. Affects the name of the extracted TensorBoard
metrics.
"""
# Search, rank and print test set metrics.
scores = np.dot(vecs.T, qvecs)
ranks = np.transpose(np.argsort(-scores, axis=0))
metrics = global_features_utils.compute_metrics_and_print(dataset, ranks,
cfg['gnd'])
# Save calculated metrics in a tensorboard format.
if writer:
if whiten:
metric_names = ['test_accuracy_whiten_{}_E'.format(dataset),
'test_accuracy_whiten_{}_M'.format(dataset),
'test_accuracy_whiten_{}_H'.format(dataset)]
else:
metric_names = ['test_accuracy_{}_E'.format(dataset),
'test_accuracy_{}_M'.format(dataset),
'test_accuracy_{}_H'.format(dataset)]
tf.summary.scalar(metric_names[0], metrics[0][0], step=epoch)
tf.summary.scalar(metric_names[1], metrics[1][0], step=epoch)
tf.summary.scalar(metric_names[2], metrics[2][0], step=epoch)
writer.flush()
return None
| 15,531 | 39.553525 | 80 | py |
models | models-master/research/delf/delf/python/training/global_features/train.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for Global Features model."""
import math
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from delf.python.datasets.sfm120k import dataset_download
from delf.python.datasets.sfm120k import sfm120k
from delf.python.training import global_features_utils
from delf.python.training import tensorboard_utils
from delf.python.training.global_features import train_utils
from delf.python.training.losses import ranking_losses
from delf.python.training.model import global_model
_LOSS_NAMES = ['contrastive', 'triplet']
_MODEL_NAMES = global_features_utils.get_standard_keras_models()
_OPTIMIZER_NAMES = ['sgd', 'adam']
_POOL_NAMES = ['mac', 'spoc', 'gem']
_PRECOMPUTE_WHITEN_NAMES = ['retrieval-SfM-30k', 'retrieval-SfM-120k']
_TEST_DATASET_NAMES = ['roxford5k', 'rparis6k']
_TRAINING_DATASET_NAMES = ['retrieval-SfM-120k']
_VALIDATION_TYPES = ['standard', 'eccv2020']
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug', False, 'Debug mode.')
# Export directory, training and val datasets, test datasets.
flags.DEFINE_string('data_root', "data",
'Absolute path to the folder containing training data.')
flags.DEFINE_string('directory', "data",
'Destination where trained network should be saved.')
flags.DEFINE_enum('training_dataset', 'retrieval-SfM-120k',
_TRAINING_DATASET_NAMES, 'Training dataset: ' +
' | '.join(_TRAINING_DATASET_NAMES) + '.')
flags.DEFINE_enum('validation_type', None, _VALIDATION_TYPES,
'Type of the evaluation to use. Either `None`, `standard` '
'or `eccv2020`.')
flags.DEFINE_list('test_datasets', 'roxford5k,rparis6k',
'Comma separated list of test datasets: ' +
' | '.join(_TEST_DATASET_NAMES) + '.')
flags.DEFINE_enum('precompute_whitening', None, _PRECOMPUTE_WHITEN_NAMES,
'Dataset used to learn whitening: ' +
' | '.join(_PRECOMPUTE_WHITEN_NAMES) + '.')
flags.DEFINE_integer('test_freq', 5,
'Run test evaluation every N epochs.')
flags.DEFINE_list('multiscale', [1.],
'Use multiscale vectors for testing, ' +
' examples: 1 | 1,1/2**(1/2),1/2 | 1,2**(1/2),1/2**(1/2)]. '
'Pass as a string of comma separated values.')
# Network architecture and initialization options.
flags.DEFINE_enum('arch', 'ResNet101', _MODEL_NAMES,
'Model architecture: ' + ' | '.join(_MODEL_NAMES) + '.')
flags.DEFINE_enum('pool', 'gem', _POOL_NAMES,
'Pooling options: ' + ' | '.join(_POOL_NAMES) + '.')
flags.DEFINE_bool('whitening', False,
'Whether to train model with learnable whitening ('
'linear layer) after the pooling.')
flags.DEFINE_bool('pretrained', True,
'Whether to initialize model with random weights ('
'default: pretrained on imagenet).')
flags.DEFINE_enum('loss', 'contrastive', _LOSS_NAMES,
'Training loss options: ' + ' | '.join(_LOSS_NAMES) + '.')
flags.DEFINE_float('loss_margin', 0.7, 'Loss margin.')
# train/val options specific for image retrieval learning.
flags.DEFINE_integer('image_size', 1024,
'Maximum size of longer image side used for training.')
flags.DEFINE_integer('neg_num', 5, 'Number of negative images per train/val '
'tuple.')
flags.DEFINE_integer('query_size', 2000,
'Number of queries randomly drawn per one training epoch.')
flags.DEFINE_integer('pool_size', 20000,
'Size of the pool for hard negative mining.')
# Standard training/validation options.
flags.DEFINE_string('gpu_id', '0', 'GPU id used for training.')
flags.DEFINE_integer('epochs', 100, 'Number of total epochs to run.')
flags.DEFINE_integer('batch_size', 5,
'Number of (q,p,n1,...,nN) tuples in a mini-batch.')
flags.DEFINE_integer('update_every', 1,
'Update model weights every N batches, used to handle '
'relatively large batches, batch_size effectively '
'becomes update_every `x` batch_size.')
flags.DEFINE_enum('optimizer', 'adam', _OPTIMIZER_NAMES,
'Optimizer options: ' + ' | '.join(_OPTIMIZER_NAMES) + '.')
flags.DEFINE_float('lr', 1e-6, 'Initial learning rate.')
flags.DEFINE_float('momentum', 0.9, 'Momentum.')
flags.DEFINE_float('weight_decay', 1e-6, 'Weight decay.')
flags.DEFINE_bool('resume', False,
'Whether to start from the latest checkpoint in the logdir.')
flags.DEFINE_bool('launch_tensorboard', False, 'Whether to launch tensorboard.')
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Manually check if there are unknown test datasets and if the dataset
# ground truth files are downloaded.
for dataset in FLAGS.test_datasets:
if dataset not in _TEST_DATASET_NAMES:
raise ValueError('Unsupported or unknown test dataset: {}.'.format(
dataset))
test_data_config = os.path.join(FLAGS.data_root,
'gnd_{}.pkl'.format(dataset))
if not tf.io.gfile.exists(test_data_config):
raise ValueError(
'{} ground truth file at {} not found. Please download it '
'according to '
'the DELG instructions.'.format(dataset, FLAGS.data_root))
# Check if train dataset is downloaded and download it if not found.
dataset_download.download_train(FLAGS.data_root)
# Creating model export directory if it does not exist.
model_directory = global_features_utils.create_model_directory(
FLAGS.training_dataset, FLAGS.arch, FLAGS.pool, FLAGS.whitening,
FLAGS.pretrained, FLAGS.loss, FLAGS.loss_margin, FLAGS.optimizer,
FLAGS.lr, FLAGS.weight_decay, FLAGS.neg_num, FLAGS.query_size,
FLAGS.pool_size, FLAGS.batch_size, FLAGS.update_every,
FLAGS.image_size, FLAGS.directory)
# Setting up logging directory, same as where the model is stored.
logging.get_absl_handler().use_absl_log_file('absl_logging', model_directory)
# Set cuda visible device.
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_id
global_features_utils.debug_and_log('>> Num GPUs Available: {}'.format(
len(tf.config.experimental.list_physical_devices('GPU'))),
FLAGS.debug)
# Set random seeds.
tf.random.set_seed(0)
np.random.seed(0)
# Initialize the model.
if FLAGS.pretrained:
global_features_utils.debug_and_log(
'>> Using pre-trained model \'{}\''.format(FLAGS.arch))
else:
global_features_utils.debug_and_log(
'>> Using model from scratch (random weights) \'{}\'.'.format(
FLAGS.arch))
model_params = {'architecture': FLAGS.arch, 'pooling': FLAGS.pool,
'whitening': FLAGS.whitening, 'pretrained': FLAGS.pretrained,
'data_root': FLAGS.data_root}
model = global_model.GlobalFeatureNet(**model_params)
# Freeze running mean and std in batch normalization layers.
# We do training one image at a time to improve memory requirements of
# the network; therefore, the computed statistics would not be per a
# batch. Instead, we choose freezing - setting the parameters of all
# batch norm layers in the network to non-trainable (i.e., using original
# imagenet statistics).
for layer in model.feature_extractor.layers:
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
global_features_utils.debug_and_log('>> Network initialized.')
global_features_utils.debug_and_log('>> Loss: {}.'.format(FLAGS.loss))
# Define the loss function.
if FLAGS.loss == 'contrastive':
criterion = ranking_losses.ContrastiveLoss(margin=FLAGS.loss_margin)
elif FLAGS.loss == 'triplet':
criterion = ranking_losses.TripletLoss(margin=FLAGS.loss_margin)
else:
raise ValueError('Loss {} not available.'.format(FLAGS.loss))
# Defining parameters for the training.
# When pre-computing whitening, we run evaluation before the network training
# and the `start_epoch` is set to 0. In other cases, we start from epoch 1.
start_epoch = 1
exp_decay = math.exp(-0.01)
decay_steps = FLAGS.query_size / FLAGS.batch_size
# Define learning rate decay schedule.
lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=FLAGS.lr,
decay_steps=decay_steps,
decay_rate=exp_decay)
# Define the optimizer.
if FLAGS.optimizer == 'sgd':
opt = tfa.optimizers.extend_with_decoupled_weight_decay(
tf.keras.optimizers.SGD)
optimizer = opt(weight_decay=FLAGS.weight_decay,
learning_rate=lr_scheduler, momentum=FLAGS.momentum)
elif FLAGS.optimizer == 'adam':
opt = tfa.optimizers.extend_with_decoupled_weight_decay(
tf.keras.optimizers.Adam)
optimizer = opt(weight_decay=FLAGS.weight_decay, learning_rate=lr_scheduler)
else:
raise ValueError('Optimizer {} not available.'.format(FLAGS.optimizer))
# Initializing logging.
writer = tf.summary.create_file_writer(model_directory)
tf.summary.experimental.set_step(1)
# Setting up the checkpoint manager.
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint,
model_directory,
max_to_keep=10,
keep_checkpoint_every_n_hours=3)
if FLAGS.resume:
# Restores the checkpoint, if existing.
global_features_utils.debug_and_log('>> Continuing from a checkpoint.')
checkpoint.restore(manager.latest_checkpoint)
# Launching tensorboard if required.
if FLAGS.launch_tensorboard:
tensorboard = tf.keras.callbacks.TensorBoard(model_directory)
tensorboard.set_model(model=model)
tensorboard_utils.launch_tensorboard(log_dir=model_directory)
# Log flags used.
global_features_utils.debug_and_log('>> Running training script with:')
global_features_utils.debug_and_log('>> logdir = {}'.format(model_directory))
if FLAGS.training_dataset.startswith('retrieval-SfM-120k'):
train_dataset = sfm120k.CreateDataset(
data_root=FLAGS.data_root,
mode='train',
imsize=FLAGS.image_size,
num_negatives=FLAGS.neg_num,
num_queries=FLAGS.query_size,
pool_size=FLAGS.pool_size
)
if FLAGS.validation_type is not None:
val_dataset = sfm120k.CreateDataset(
data_root=FLAGS.data_root,
mode='val',
imsize=FLAGS.image_size,
num_negatives=FLAGS.neg_num,
num_queries=float('Inf'),
pool_size=float('Inf'),
eccv2020=True if FLAGS.validation_type == 'eccv2020' else False
)
train_dataset_output_types = [tf.float32 for i in range(2 + FLAGS.neg_num)]
train_dataset_output_types.append(tf.int32)
global_features_utils.debug_and_log(
'>> Training the {} network'.format(model_directory))
global_features_utils.debug_and_log('>> GPU ids: {}'.format(FLAGS.gpu_id))
with writer.as_default():
# Precompute whitening if needed.
if FLAGS.precompute_whitening is not None:
epoch = 0
train_utils.test_retrieval(
FLAGS.test_datasets, model, writer=writer,
epoch=epoch, model_directory=model_directory,
precompute_whitening=FLAGS.precompute_whitening,
data_root=FLAGS.data_root,
multiscale=FLAGS.multiscale)
for epoch in range(start_epoch, FLAGS.epochs + 1):
# Set manual seeds per epoch.
np.random.seed(epoch)
tf.random.set_seed(epoch)
# Find hard-negatives.
# While hard-positive examples are fixed during the whole training
# process and are randomly chosen from every epoch; hard-negatives
# depend on the current CNN parameters and are re-mined once per epoch.
avg_neg_distance = train_dataset.create_epoch_tuples(model)
def _train_gen():
return (inst for inst in train_dataset)
train_loader = tf.data.Dataset.from_generator(
_train_gen,
output_types=tuple(train_dataset_output_types))
loss = train_utils.train_val_one_epoch(
loader=iter(train_loader), model=model,
criterion=criterion, optimizer=optimizer, epoch=epoch,
batch_size=FLAGS.batch_size, query_size=FLAGS.query_size,
neg_num=FLAGS.neg_num, update_every=FLAGS.update_every,
debug=FLAGS.debug)
# Write a scalar summary.
tf.summary.scalar('train_epoch_loss', loss, step=epoch)
# Forces summary writer to send any buffered data to storage.
writer.flush()
# Evaluate on validation set.
if FLAGS.validation_type is not None and (epoch % FLAGS.test_freq == 0 or
epoch == 1):
avg_neg_distance = val_dataset.create_epoch_tuples(model,
model_directory)
def _val_gen():
return (inst for inst in val_dataset)
val_loader = tf.data.Dataset.from_generator(
_val_gen, output_types=tuple(train_dataset_output_types))
loss = train_utils.train_val_one_epoch(
loader=iter(val_loader), model=model,
criterion=criterion, optimizer=None,
epoch=epoch, train=False, batch_size=FLAGS.batch_size,
query_size=FLAGS.query_size, neg_num=FLAGS.neg_num,
update_every=FLAGS.update_every, debug=FLAGS.debug)
tf.summary.scalar('val_epoch_loss', loss, step=epoch)
writer.flush()
# Evaluate on test datasets every test_freq epochs.
if epoch == 1 or epoch % FLAGS.test_freq == 0:
train_utils.test_retrieval(
FLAGS.test_datasets, model, writer=writer, epoch=epoch,
model_directory=model_directory,
precompute_whitening=FLAGS.precompute_whitening,
data_root=FLAGS.data_root, multiscale=FLAGS.multiscale)
# Saving checkpoints and model weights.
try:
save_path = manager.save(checkpoint_number=epoch)
global_features_utils.debug_and_log(
'Saved ({}) at {}'.format(epoch, save_path))
filename = os.path.join(model_directory,
'checkpoint_epoch_{}.h5'.format(epoch))
model.save_weights(filename, save_format='h5')
global_features_utils.debug_and_log(
'Saved weights ({}) at {}'.format(epoch, filename))
except Exception as ex:
global_features_utils.debug_and_log(
'Could not save checkpoint: {}'.format(ex))
if __name__ == '__main__':
app.run(main)
| 15,702 | 42.258953 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delf_model_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DELF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from delf.python.training.model import delf_model
class DelfTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_build_model(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
model = delf_model.Delf(block3_strides=block3_strides, name='DELF')
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
blocks = {}
# Get global feature by pooling block4 features.
desc_prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
desc_logits = model.desc_classification(desc_prelogits)
self.assertAllEqual(desc_prelogits.shape, (batch_size, 2048))
self.assertAllEqual(desc_logits.shape, (batch_size, num_classes))
features = blocks['block3']
attn_prelogits, _, _ = model.attention(features)
attn_logits = model.attn_classification(attn_prelogits)
self.assertAllEqual(attn_prelogits.shape, (batch_size, 1024))
self.assertAllEqual(attn_logits.shape, (batch_size, num_classes))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_train_step(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
clip_val = 10.0
input_shape = (batch_size, image_size, image_size, 3)
model = delf_model.Delf(block3_strides=block3_strides, name='DELF')
model.init_classifiers(num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
images = tf.random.uniform(input_shape, minval=0.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=batch_size)
with tf.GradientTape() as gradient_tape:
(desc_prelogits, attn_prelogits, _, _, _,
_) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
desc_logits = model.desc_classification(desc_prelogits)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Cumulate global loss and attention loss and backpropagate through the
# descriptor layer and attention layer together.
total_loss = desc_loss + attn_loss
gradients = gradient_tape.gradient(total_loss, model.trainable_weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, model.trainable_weights))
if __name__ == '__main__':
tf.test.main()
| 4,198 | 37.522936 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delf_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELF model implementation based on the following paper.
Large-Scale Image Retrieval with Attentive Deep Local Features
https://arxiv.org/abs/1612.06321
"""
import tensorflow as tf
from delf.python.training.model import resnet50 as resnet
layers = tf.keras.layers
reg = tf.keras.regularizers
_DECAY = 0.0001
class AttentionModel(tf.keras.Model):
"""Instantiates attention model.
Uses two [kernel_size x kernel_size] convolutions and softplus as activation
to compute an attention map with the same resolution as the featuremap.
Features l2-normalized and aggregated using attention probabilites as weights.
The features (targets) to be aggregated can be the input featuremap, or a
different one with the same resolution.
"""
def __init__(self, kernel_size=1, decay=_DECAY, name='attention'):
"""Initialization of attention model.
Args:
kernel_size: int, kernel size of convolutions.
decay: float, decay for l2 regularization of kernel weights.
name: str, name to identify model.
"""
super(AttentionModel, self).__init__(name=name)
# First convolutional layer (called with relu activation).
self.conv1 = layers.Conv2D(
512,
kernel_size,
kernel_regularizer=reg.l2(decay),
padding='same',
name='attn_conv1')
self.bn_conv1 = layers.BatchNormalization(axis=3, name='bn_conv1')
# Second convolutional layer, with softplus activation.
self.conv2 = layers.Conv2D(
1,
kernel_size,
kernel_regularizer=reg.l2(decay),
padding='same',
name='attn_conv2')
self.activation_layer = layers.Activation('softplus')
def call(self, inputs, targets=None, training=True):
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
score = self.conv2(x)
prob = self.activation_layer(score)
# Aggregate inputs if targets is None.
if targets is None:
targets = inputs
# L2-normalize the featuremap before pooling.
targets = tf.nn.l2_normalize(targets, axis=-1)
feat = tf.reduce_mean(tf.multiply(targets, prob), [1, 2], keepdims=False)
return feat, prob, score
class AutoencoderModel(tf.keras.Model):
"""Instantiates the Keras Autoencoder model."""
def __init__(self, reduced_dimension, expand_dimension, kernel_size=1,
name='autoencoder'):
"""Initialization of Autoencoder model.
Args:
reduced_dimension: int, the output dimension of the autoencoder layer.
expand_dimension: int, the input dimension of the autoencoder layer.
kernel_size: int or tuple, height and width of the 2D convolution window.
name: str, name to identify model.
"""
super(AutoencoderModel, self).__init__(name=name)
self.conv1 = layers.Conv2D(
reduced_dimension,
kernel_size,
padding='same',
name='autoenc_conv1')
self.conv2 = layers.Conv2D(
expand_dimension,
kernel_size,
activation=tf.keras.activations.relu,
padding='same',
name='autoenc_conv2')
def call(self, inputs):
dim_reduced_features = self.conv1(inputs)
dim_expanded_features = self.conv2(dim_reduced_features)
return dim_expanded_features, dim_reduced_features
class Delf(tf.keras.Model):
"""Instantiates Keras DELF model using ResNet50 as backbone.
This class implements the [DELF](https://arxiv.org/abs/1612.06321) model for
extracting local features from images. The backbone is a ResNet50 network
that extracts featuremaps from both conv_4 and conv_5 layers. Activations
from conv_4 are used to compute an attention map of the same resolution.
"""
def __init__(self,
block3_strides=True,
name='DELF',
pooling='avg',
gem_power=3.0,
embedding_layer=False,
embedding_layer_dim=2048,
use_dim_reduction=False,
reduced_dimension=128,
dim_expand_channels=1024):
"""Initialization of DELF model.
Args:
block3_strides: bool, whether to add strides to the output of block3.
name: str, name to identify model.
pooling: str, pooling mode for global feature extraction; possible values
are 'None', 'avg', 'max', 'gem.'
gem_power: float, GeM power for GeM pooling. Only used if pooling ==
'gem'.
embedding_layer: bool, whether to create an embedding layer (FC whitening
layer).
embedding_layer_dim: int, size of the embedding layer.
use_dim_reduction: Whether to integrate dimensionality reduction layers.
If True, extra layers are added to reduce the dimensionality of the
extracted features.
reduced_dimension: int, only used if use_dim_reduction is True. The output
dimension of the autoencoder layer.
dim_expand_channels: int, only used if use_dim_reduction is True. The
number of channels of the backbone block used. Default value 1024 is the
number of channels of backbone block 'block3'.
"""
super(Delf, self).__init__(name=name)
# Backbone using Keras ResNet50.
self.backbone = resnet.ResNet50(
'channels_last',
name='backbone',
include_top=False,
pooling=pooling,
block3_strides=block3_strides,
average_pooling=False,
gem_power=gem_power,
embedding_layer=embedding_layer,
embedding_layer_dim=embedding_layer_dim)
# Attention model.
self.attention = AttentionModel(name='attention')
# Autoencoder model.
self._use_dim_reduction = use_dim_reduction
if self._use_dim_reduction:
self.autoencoder = AutoencoderModel(reduced_dimension,
dim_expand_channels,
name='autoencoder')
def init_classifiers(self, num_classes, desc_classification=None):
"""Define classifiers for training backbone and attention models."""
self.num_classes = num_classes
if desc_classification is None:
self.desc_classification = layers.Dense(
num_classes, activation=None, kernel_regularizer=None, name='desc_fc')
else:
self.desc_classification = desc_classification
self.attn_classification = layers.Dense(
num_classes, activation=None, kernel_regularizer=None, name='att_fc')
def global_and_local_forward_pass(self, images, training=True):
"""Run a forward to calculate global descriptor and attention prelogits.
Args:
images: Tensor containing the dataset on which to run the forward pass.
training: Indicator of wether the forward pass is running in training mode
or not.
Returns:
Global descriptor prelogits, attention prelogits, attention scores,
backbone weights.
"""
backbone_blocks = {}
desc_prelogits = self.backbone.build_call(
images, intermediates_dict=backbone_blocks, training=training)
# Prevent gradients from propagating into the backbone. See DELG paper:
# https://arxiv.org/abs/2001.05027.
block3 = backbone_blocks['block3'] # pytype: disable=key-error
block3 = tf.stop_gradient(block3)
if self._use_dim_reduction:
(dim_expanded_features, dim_reduced_features) = self.autoencoder(block3)
attn_prelogits, attn_scores, _ = self.attention(
block3,
targets=dim_expanded_features,
training=training)
else:
attn_prelogits, attn_scores, _ = self.attention(block3, training=training)
dim_expanded_features = None
dim_reduced_features = None
return (desc_prelogits, attn_prelogits, attn_scores, backbone_blocks,
dim_expanded_features, dim_reduced_features)
def build_call(self, input_image, training=True):
(global_feature, _, attn_scores, backbone_blocks, _,
dim_reduced_features) = self.global_and_local_forward_pass(input_image,
training)
if self._use_dim_reduction:
features = dim_reduced_features
else:
features = backbone_blocks['block3'] # pytype: disable=key-error
return global_feature, attn_scores, features
def call(self, input_image, training=True):
_, probs, features = self.build_call(input_image, training=training)
return probs, features
| 9,075 | 36.659751 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delg_model.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELG model implementation based on the following paper.
Unifying Deep Local and Global Features for Image Search
https://arxiv.org/abs/2001.05027
"""
import functools
import math
from absl import logging
import tensorflow as tf
from delf.python.training.model import delf_model
layers = tf.keras.layers
class Delg(delf_model.Delf):
"""Instantiates Keras DELG model using ResNet50 as backbone.
This class implements the [DELG](https://arxiv.org/abs/2001.05027) model for
extracting local and global features from images. The same attention layer
is trained as in the DELF model. In addition, the extraction of global
features is trained using GeMPooling, a FC whitening layer also called
"embedding layer" and ArcFace loss.
"""
def __init__(self,
block3_strides=True,
name='DELG',
gem_power=3.0,
embedding_layer_dim=2048,
scale_factor_init=45.25, # sqrt(2048)
arcface_margin=0.1,
use_dim_reduction=False,
reduced_dimension=128,
dim_expand_channels=1024):
"""Initialization of DELG model.
Args:
block3_strides: bool, whether to add strides to the output of block3.
name: str, name to identify model.
gem_power: float, GeM power parameter.
embedding_layer_dim : int, dimension of the embedding layer.
scale_factor_init: float.
arcface_margin: float, ArcFace margin.
use_dim_reduction: Whether to integrate dimensionality reduction layers.
If True, extra layers are added to reduce the dimensionality of the
extracted features.
reduced_dimension: Only used if use_dim_reduction is True, the output
dimension of the dim_reduction layer.
dim_expand_channels: Only used if use_dim_reduction is True, the
number of channels of the backbone block used. Default value 1024 is the
number of channels of backbone block 'block3'.
"""
logging.info('Creating Delg model, gem_power %d, embedding_layer_dim %d',
gem_power, embedding_layer_dim)
super(Delg, self).__init__(block3_strides=block3_strides,
name=name,
pooling='gem',
gem_power=gem_power,
embedding_layer=True,
embedding_layer_dim=embedding_layer_dim,
use_dim_reduction=use_dim_reduction,
reduced_dimension=reduced_dimension,
dim_expand_channels=dim_expand_channels)
self._embedding_layer_dim = embedding_layer_dim
self._scale_factor_init = scale_factor_init
self._arcface_margin = arcface_margin
def init_classifiers(self, num_classes):
"""Define classifiers for training backbone and attention models."""
logging.info('Initializing Delg backbone and attention models classifiers')
backbone_classifier_func = self._create_backbone_classifier(num_classes)
super(Delg, self).init_classifiers(
num_classes,
desc_classification=backbone_classifier_func)
def _create_backbone_classifier(self, num_classes):
"""Define the classifier for training the backbone model."""
logging.info('Creating cosine classifier')
self.cosine_weights = tf.Variable(
initial_value=tf.initializers.GlorotUniform()(
shape=[self._embedding_layer_dim, num_classes]),
name='cosine_weights',
trainable=True)
self.scale_factor = tf.Variable(self._scale_factor_init,
name='scale_factor',
trainable=False)
classifier_func = functools.partial(cosine_classifier_logits,
num_classes=num_classes,
cosine_weights=self.cosine_weights,
scale_factor=self.scale_factor,
arcface_margin=self._arcface_margin)
classifier_func.trainable_weights = [self.cosine_weights]
return classifier_func
def cosine_classifier_logits(prelogits,
labels,
num_classes,
cosine_weights,
scale_factor,
arcface_margin,
training=True):
"""Compute cosine classifier logits using ArFace margin.
Args:
prelogits: float tensor of shape [batch_size, embedding_layer_dim].
labels: int tensor of shape [batch_size].
num_classes: int, number of classes.
cosine_weights: float tensor of shape [embedding_layer_dim, num_classes].
scale_factor: float.
arcface_margin: float. Only used if greater than zero, and training is True.
training: bool, True if training, False if eval.
Returns:
logits: Float tensor [batch_size, num_classes].
"""
# L2-normalize prelogits, then obtain cosine similarity.
normalized_prelogits = tf.math.l2_normalize(prelogits, axis=1)
normalized_weights = tf.math.l2_normalize(cosine_weights, axis=0)
cosine_sim = tf.matmul(normalized_prelogits, normalized_weights)
# Optionally use ArcFace margin.
if training and arcface_margin > 0.0:
# Reshape labels tensor from [batch_size] to [batch_size, num_classes].
one_hot_labels = tf.one_hot(labels, num_classes)
cosine_sim = apply_arcface_margin(cosine_sim,
one_hot_labels,
arcface_margin)
# Apply the scale factor to logits and return.
logits = scale_factor * cosine_sim
return logits
def apply_arcface_margin(cosine_sim, one_hot_labels, arcface_margin):
"""Applies ArcFace margin to cosine similarity inputs.
For a reference, see https://arxiv.org/pdf/1801.07698.pdf. ArFace margin is
applied to angles from correct classes (as per the ArcFace paper), and only
if they are <= (pi - margin). Otherwise, applying the margin may actually
improve their cosine similarity.
Args:
cosine_sim: float tensor with shape [batch_size, num_classes].
one_hot_labels: int tensor with shape [batch_size, num_classes].
arcface_margin: float.
Returns:
cosine_sim_with_margin: Float tensor with shape [batch_size, num_classes].
"""
theta = tf.acos(cosine_sim, name='acos')
selected_labels = tf.where(tf.greater(theta, math.pi - arcface_margin),
tf.zeros_like(one_hot_labels),
one_hot_labels,
name='selected_labels')
final_theta = tf.where(tf.cast(selected_labels, dtype=tf.bool),
theta + arcface_margin,
theta,
name='final_theta')
return tf.cos(final_theta, name='cosine_sim_with_margin')
| 7,641 | 41.692737 | 80 | py |
models | models-master/research/delf/delf/python/training/model/delg_model_test.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DELG model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from delf.python.training.model import delg_model
class DelgTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_forward_pass(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
local_feature_dim = 64
feature_map_size = image_size // 16 # reduction factor for resnet50.
if block3_strides:
feature_map_size //= 2
model = delg_model.Delg(block3_strides=block3_strides,
use_dim_reduction=True,
reduced_dimension=local_feature_dim)
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
# Run a complete forward pass of the model.
global_feature, attn_scores, local_features = model.build_call(images)
self.assertAllEqual(global_feature.shape, (batch_size, 2048))
self.assertAllEqual(
attn_scores.shape,
(batch_size, feature_map_size, feature_map_size, 1))
self.assertAllEqual(
local_features.shape,
(batch_size, feature_map_size, feature_map_size, local_feature_dim))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_build_model(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
input_shape = (batch_size, image_size, image_size, 3)
model = delg_model.Delg(
block3_strides=block3_strides,
use_dim_reduction=True)
model.init_classifiers(num_classes)
images = tf.random.uniform(input_shape, minval=-1.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
blocks = {}
desc_prelogits = model.backbone(
images, intermediates_dict=blocks, training=False)
desc_logits = model.desc_classification(desc_prelogits, labels)
self.assertAllEqual(desc_prelogits.shape, (batch_size, 2048))
self.assertAllEqual(desc_logits.shape, (batch_size, num_classes))
features = blocks['block3']
attn_prelogits, _, _ = model.attention(features)
attn_logits = model.attn_classification(attn_prelogits)
self.assertAllEqual(attn_prelogits.shape, (batch_size, 1024))
self.assertAllEqual(attn_logits.shape, (batch_size, num_classes))
@parameterized.named_parameters(
('block3_stridesTrue', True),
('block3_stridesFalse', False),
)
def test_train_step(self, block3_strides):
image_size = 321
num_classes = 1000
batch_size = 2
clip_val = 10.0
input_shape = (batch_size, image_size, image_size, 3)
model = delg_model.Delg(
block3_strides=block3_strides,
use_dim_reduction=True)
model.init_classifiers(num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
images = tf.random.uniform(input_shape, minval=0.0, maxval=1.0, seed=0)
labels = tf.random.uniform((batch_size,),
minval=0,
maxval=model.num_classes - 1,
dtype=tf.int64)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=batch_size)
with tf.GradientTape() as gradient_tape:
(desc_prelogits, attn_prelogits, _, backbone_blocks,
dim_expanded_features, _) = model.global_and_local_forward_pass(images)
# Calculate global loss by applying the descriptor classifier.
desc_logits = model.desc_classification(desc_prelogits, labels)
desc_loss = compute_loss(labels, desc_logits)
# Calculate attention loss by applying the attention block classifier.
attn_logits = model.attn_classification(attn_prelogits)
attn_loss = compute_loss(labels, attn_logits)
# Calculate reconstruction loss between the attention prelogits and the
# backbone.
block3 = tf.stop_gradient(backbone_blocks['block3'])
reconstruction_loss = tf.math.reduce_mean(
tf.keras.losses.MSE(block3, dim_expanded_features))
# Cumulate global loss and attention loss and backpropagate through the
# descriptor layer and attention layer together.
total_loss = desc_loss + attn_loss + reconstruction_loss
gradients = gradient_tape.gradient(total_loss, model.trainable_weights)
clipped, _ = tf.clip_by_global_norm(gradients, clip_norm=clip_val)
optimizer.apply_gradients(zip(clipped, model.trainable_weights))
if __name__ == '__main__':
tf.test.main()
| 5,899 | 37.815789 | 80 | py |
models | models-master/research/delf/delf/python/training/model/resnet50.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 backbone used in DELF model.
Copied over from tensorflow/python/eager/benchmarks/resnet50/resnet50.py,
because that code does not support dependencies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
from absl import logging
import h5py
import tensorflow as tf
from delf.python.pooling_layers import pooling as pooling_layers
layers = tf.keras.layers
class _IdentityBlock(tf.keras.Model):
"""_IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
"""
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
data_format=data_format,
name=conv_name_base + '2b')
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
class _ConvBlock(tf.keras.Model):
"""_ConvBlock is the block that has a conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
strides: strides for the convolution. Note that from stage 3, the first
conv layer at main path is with strides=(2,2), and the shortcut should
have strides=(2,2) as well.
"""
def __init__(self,
kernel_size,
filters,
stage,
block,
data_format,
strides=(2, 2)):
super(_ConvBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1),
strides=strides,
name=conv_name_base + '2a',
data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
name=conv_name_base + '2b',
data_format=data_format)
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
self.conv_shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
name=conv_name_base + '1',
data_format=data_format)
self.bn_shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
shortcut = self.conv_shortcut(input_tensor)
shortcut = self.bn_shortcut(shortcut, training=training)
x += shortcut
return tf.nn.relu(x)
# pylint: disable=not-callable
class ResNet50(tf.keras.Model):
"""Instantiates the ResNet50 architecture.
Args:
data_format: format for the image. Either 'channels_first' or
'channels_last'. 'channels_first' is typically faster on GPUs while
'channels_last' is typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
name: Prefix applied to names of variables created in the model.
include_top: whether to include the fully-connected layer at the top of the
network.
pooling: Optional pooling mode for feature extraction when `include_top` is
False. 'None' means that the output of the model will be the 4D tensor
output of the last convolutional layer. 'avg' means that global average
pooling will be applied to the output of the last convolutional layer, and
thus the output of the model will be a 2D tensor. 'max' means that global
max pooling will be applied. 'gem' means GeM pooling will be applied.
block3_strides: whether to add a stride of 2 to block3 to make it compatible
with tf.slim ResNet implementation.
average_pooling: whether to do average pooling of block4 features before
global pooling.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True.
gem_power: GeM power for GeM pooling. Only used if pooling == 'gem'.
embedding_layer: whether to create an embedding layer (FC whitening layer).
embedding_layer_dim: size of the embedding layer.
Raises:
ValueError: in case of invalid argument for data_format.
"""
def __init__(self,
data_format,
name='',
include_top=True,
pooling=None,
block3_strides=False,
average_pooling=True,
classes=1000,
gem_power=3.0,
embedding_layer=False,
embedding_layer_dim=2048):
super(ResNet50, self).__init__(name=name)
valid_channel_values = ('channels_first', 'channels_last')
if data_format not in valid_channel_values:
raise ValueError('Unknown data_format: %s. Valid values: %s' %
(data_format, valid_channel_values))
self.include_top = include_top
self.block3_strides = block3_strides
self.average_pooling = average_pooling
self.pooling = pooling
def conv_block(filters, stage, block, strides=(2, 2)):
return _ConvBlock(
3,
filters,
stage=stage,
block=block,
data_format=data_format,
strides=strides)
def id_block(filters, stage, block):
return _IdentityBlock(
3, filters, stage=stage, block=block, data_format=data_format)
self.conv1 = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
data_format=data_format,
padding='same',
name='conv1')
bn_axis = 1 if data_format == 'channels_first' else 3
self.bn_conv1 = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')
self.max_pool = layers.MaxPooling2D((3, 3),
strides=(2, 2),
data_format=data_format)
self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1))
self.l2b = id_block([64, 64, 256], stage=2, block='b')
self.l2c = id_block([64, 64, 256], stage=2, block='c')
self.l3a = conv_block([128, 128, 512], stage=3, block='a')
self.l3b = id_block([128, 128, 512], stage=3, block='b')
self.l3c = id_block([128, 128, 512], stage=3, block='c')
self.l3d = id_block([128, 128, 512], stage=3, block='d')
self.l4a = conv_block([256, 256, 1024], stage=4, block='a')
self.l4b = id_block([256, 256, 1024], stage=4, block='b')
self.l4c = id_block([256, 256, 1024], stage=4, block='c')
self.l4d = id_block([256, 256, 1024], stage=4, block='d')
self.l4e = id_block([256, 256, 1024], stage=4, block='e')
self.l4f = id_block([256, 256, 1024], stage=4, block='f')
# Striding layer that can be used on top of block3 to produce feature maps
# with the same resolution as the TF-Slim implementation.
if self.block3_strides:
self.subsampling_layer = layers.MaxPooling2D((1, 1),
strides=(2, 2),
data_format=data_format)
self.l5a = conv_block([512, 512, 2048],
stage=5,
block='a',
strides=(1, 1))
else:
self.l5a = conv_block([512, 512, 2048], stage=5, block='a')
self.l5b = id_block([512, 512, 2048], stage=5, block='b')
self.l5c = id_block([512, 512, 2048], stage=5, block='c')
self.avg_pool = layers.AveragePooling2D((7, 7),
strides=(7, 7),
data_format=data_format)
if self.include_top:
self.flatten = layers.Flatten()
self.fc1000 = layers.Dense(classes, name='fc1000')
else:
reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3]
reduction_indices = tf.constant(reduction_indices)
if pooling == 'avg':
self.global_pooling = functools.partial(
tf.reduce_mean, axis=reduction_indices, keepdims=False)
elif pooling == 'max':
self.global_pooling = functools.partial(
tf.reduce_max, axis=reduction_indices, keepdims=False)
elif pooling == 'gem':
logging.info('Adding GeMPooling layer with power %f', gem_power)
self.global_pooling = functools.partial(
pooling_layers.gem, axis=reduction_indices, power=gem_power)
else:
self.global_pooling = None
if embedding_layer:
logging.info('Adding embedding layer with dimension %d',
embedding_layer_dim)
self.embedding_layer = layers.Dense(
embedding_layer_dim, name='embedding_layer')
else:
self.embedding_layer = None
def build_call(self, inputs, training=True, intermediates_dict=None):
"""Building the ResNet50 model.
Args:
inputs: Images to compute features for.
training: Whether model is in training phase.
intermediates_dict: `None` or dictionary. If not None, accumulate feature
maps from intermediate blocks into the dictionary. ""
Returns:
Tensor with featuremap.
"""
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
if intermediates_dict is not None:
intermediates_dict['block0'] = x
x = self.max_pool(x)
if intermediates_dict is not None:
intermediates_dict['block0mp'] = x
# Block 1 (equivalent to "conv2" in Resnet paper).
x = self.l2a(x, training=training)
x = self.l2b(x, training=training)
x = self.l2c(x, training=training)
if intermediates_dict is not None:
intermediates_dict['block1'] = x
# Block 2 (equivalent to "conv3" in Resnet paper).
x = self.l3a(x, training=training)
x = self.l3b(x, training=training)
x = self.l3c(x, training=training)
x = self.l3d(x, training=training)
if intermediates_dict is not None:
intermediates_dict['block2'] = x
# Block 3 (equivalent to "conv4" in Resnet paper).
x = self.l4a(x, training=training)
x = self.l4b(x, training=training)
x = self.l4c(x, training=training)
x = self.l4d(x, training=training)
x = self.l4e(x, training=training)
x = self.l4f(x, training=training)
if self.block3_strides:
x = self.subsampling_layer(x)
if intermediates_dict is not None:
intermediates_dict['block3'] = x
else:
if intermediates_dict is not None:
intermediates_dict['block3'] = x
x = self.l5a(x, training=training)
x = self.l5b(x, training=training)
x = self.l5c(x, training=training)
if self.average_pooling:
x = self.avg_pool(x)
if intermediates_dict is not None:
intermediates_dict['block4'] = x
else:
if intermediates_dict is not None:
intermediates_dict['block4'] = x
if self.include_top:
return self.fc1000(self.flatten(x))
elif self.global_pooling:
x = self.global_pooling(x)
if self.embedding_layer:
x = self.embedding_layer(x)
return x
else:
return x
def call(self, inputs, training=True, intermediates_dict=None):
"""Call the ResNet50 model.
Args:
inputs: Images to compute features for.
training: Whether model is in training phase.
intermediates_dict: `None` or dictionary. If not None, accumulate feature
maps from intermediate blocks into the dictionary. ""
Returns:
Tensor with featuremap.
"""
return self.build_call(inputs, training, intermediates_dict)
def restore_weights(self, filepath):
"""Load pretrained weights.
This function loads a .h5 file from the filepath with saved model weights
and assigns them to the model.
Args:
filepath: String, path to the .h5 file
Raises:
ValueError: if the file referenced by `filepath` does not exist.
"""
if not tf.io.gfile.exists(filepath):
raise ValueError('Unable to load weights from %s. You must provide a'
'valid file.' % (filepath))
# Create a local copy of the weights file for h5py to be able to read it.
local_filename = os.path.basename(filepath)
tmp_filename = os.path.join(tempfile.gettempdir(), local_filename)
tf.io.gfile.copy(filepath, tmp_filename, overwrite=True)
# Load the content of the weights file.
f = h5py.File(tmp_filename, mode='r')
saved_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
try:
# Iterate through all the layers assuming the max `depth` is 2.
for layer in self.layers:
if hasattr(layer, 'layers'):
for inlayer in layer.layers:
# Make sure the weights are in the saved model, and that we are in
# the innermost layer.
if inlayer.name not in saved_layer_names:
raise ValueError('Layer %s absent from the pretrained weights.'
'Unable to load its weights.' % (inlayer.name))
if hasattr(inlayer, 'layers'):
raise ValueError('Layer %s is not a depth 2 layer. Unable to load'
'its weights.' % (inlayer.name))
# Assign the weights in the current layer.
g = f[inlayer.name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
logging.info('Setting the weights for layer %s', inlayer.name)
inlayer.set_weights(weight_values)
finally:
# Clean up the temporary file.
tf.io.gfile.remove(tmp_filename)
def log_weights(self):
"""Log backbone weights."""
logging.info('Logging backbone weights')
logging.info('------------------------')
for layer in self.layers:
if hasattr(layer, 'layers'):
for inlayer in layer.layers:
logging.info('Weights for layer: %s, inlayer % s', layer.name,
inlayer.name)
weights = inlayer.get_weights()
logging.info(weights)
else:
logging.info('Layer %s does not have inner layers.', layer.name)
| 17,089 | 36.071584 | 80 | py |
models | models-master/research/delf/delf/python/training/model/global_model.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CNN Image Retrieval model implementation based on the following papers:
[1] Fine-tuning CNN Image Retrieval with No Human Annotation,
Radenović F., Tolias G., Chum O., TPAMI 2018 [arXiv]
https://arxiv.org/abs/1711.02512
[2] CNN Image Retrieval Learns from BoW: Unsupervised Fine-Tuning with Hard
Examples, Radenović F., Tolias G., Chum O., ECCV 2016 [arXiv]
https://arxiv.org/abs/1604.02426
"""
import os
import pickle
import tensorflow as tf
from delf.python.datasets import generic_dataset
from delf.python.normalization_layers import normalization
from delf.python.pooling_layers import pooling as pooling_layers
from delf.python.training import global_features_utils
# Pre-computed global whitening, for most commonly used architectures.
# Using pre-computed whitening improves the speed of the convergence and the
# performance.
_WHITENING_CONFIG = {
'ResNet50': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet50_gem_learned_whitening_config.pkl',
'ResNet101': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet101_gem_learned_whitening_config.pkl',
'ResNet152': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_ResNet152_gem_learned_whitening_config.pkl',
'VGG19': 'http://cmp.felk.cvut.cz/cnnimageretrieval_tf'
'/SFM120k_VGG19_gem_learned_whitening_config.pkl'
}
# Possible global pooling layers.
_POOLING = {
'mac': pooling_layers.MAC,
'spoc': pooling_layers.SPoC,
'gem': pooling_layers.GeM
}
# Output dimensionality for supported architectures.
_OUTPUT_DIM = {
'VGG16': 512,
'VGG19': 512,
'ResNet50': 2048,
'ResNet101': 2048,
'ResNet101V2': 2048,
'ResNet152': 2048,
'DenseNet121': 1024,
'DenseNet169': 1664,
'DenseNet201': 1920,
'EfficientNetB5': 2048,
'EfficientNetB7': 2560
}
class GlobalFeatureNet(tf.keras.Model):
"""Instantiates global model for image retrieval.
This class implements the [GlobalFeatureNet](
https://arxiv.org/abs/1711.02512) for image retrieval. The model uses a
user-defined model as a backbone.
"""
def __init__(self, architecture='ResNet101', pooling='gem',
whitening=False, pretrained=True, data_root=''):
"""GlobalFeatureNet network initialization.
Args:
architecture: Network backbone.
pooling: Pooling method used 'mac'/'spoc'/'gem'.
whitening: Bool, whether to use whitening.
pretrained: Bool, whether to initialize the network with the weights
pretrained on ImageNet.
data_root: String, path to the data folder where the precomputed
whitening is/will be saved in case `whitening` is True.
Raises:
ValueError: If `architecture` is not supported.
"""
if architecture not in _OUTPUT_DIM.keys():
raise ValueError("Architecture {} is not supported.".format(architecture))
super(GlobalFeatureNet, self).__init__()
# Get standard output dimensionality size.
dim = _OUTPUT_DIM[architecture]
if pretrained:
# Initialize with network pretrained on imagenet.
net_in = getattr(tf.keras.applications, architecture)(include_top=False,
weights="imagenet")
else:
# Initialize with random weights.
net_in = getattr(tf.keras.applications, architecture)(include_top=False,
weights=None)
# Initialize `feature_extractor`. Take only convolutions for
# `feature_extractor`, always end with ReLU to make last activations
# non-negative.
if architecture.lower().startswith('densenet'):
tmp_model = tf.keras.Sequential()
tmp_model.add(net_in)
net_in = tmp_model
net_in.add(tf.keras.layers.ReLU())
# Initialize pooling.
self.pool = _POOLING[pooling]()
# Initialize whitening.
if whitening:
if pretrained and architecture in _WHITENING_CONFIG:
# If precomputed whitening for the architecture exists,
# the fully-connected layer is going to be initialized according to
# the precomputed layer configuration.
global_features_utils.debug_and_log(
">> {}: for '{}' custom computed whitening '{}' is used."
.format(os.getcwd(), architecture,
os.path.basename(_WHITENING_CONFIG[architecture])))
# The layer configuration is downloaded to the `data_root` folder.
whiten_dir = os.path.join(data_root, architecture)
path = tf.keras.utils.get_file(fname=whiten_dir,
origin=_WHITENING_CONFIG[architecture])
# Whitening configuration is loaded.
with tf.io.gfile.GFile(path, 'rb') as learned_whitening_file:
whitening_config = pickle.load(learned_whitening_file)
# Whitening layer is initialized according to the configuration.
self.whiten = tf.keras.layers.Dense.from_config(whitening_config)
else:
# In case if no precomputed whitening exists for the chosen
# architecture, the fully-connected whitening layer is initialized
# with the random weights.
self.whiten = tf.keras.layers.Dense(dim, activation=None, use_bias=True)
global_features_utils.debug_and_log(
">> There is either no whitening computed for the "
"used network architecture or pretrained is False,"
" random weights are used.")
else:
self.whiten = None
# Create meta information to be stored in the network.
self.meta = {
'architecture': architecture,
'pooling': pooling,
'whitening': whitening,
'outputdim': dim
}
self.feature_extractor = net_in
self.normalize = normalization.L2Normalization()
def call(self, x, training=False):
"""Invokes the GlobalFeatureNet instance.
Args:
x: [B, H, W, C] Tensor with a batch of images.
training: Indicator of whether the forward pass is running in training
mode or not.
Returns:
out: [B, out_dim] Global descriptor.
"""
# Forward pass through the fully-convolutional backbone.
o = self.feature_extractor(x, training)
# Pooling.
o = self.pool(o)
# Normalization.
o = self.normalize(o)
# If whitening exists: the pooled global descriptor is whitened and
# re-normalized.
if self.whiten is not None:
o = self.whiten(o)
o = self.normalize(o)
return o
def meta_repr(self):
'''Provides high-level information about the network.
Returns:
meta: string with the information about the network (used
architecture, pooling type, whitening, outputdim).
'''
tmpstr = '(meta):\n'
tmpstr += '\tarchitecture: {}\n'.format(self.meta['architecture'])
tmpstr += '\tpooling: {}\n'.format(self.meta['pooling'])
tmpstr += '\twhitening: {}\n'.format(self.meta['whitening'])
tmpstr += '\toutputdim: {}\n'.format(self.meta['outputdim'])
return tmpstr
def extract_global_descriptors_from_list(net, images, image_size,
bounding_boxes=None, scales=[1.],
multi_scale_power=1., print_freq=10):
"""Extracting global descriptors from a list of images.
Args:
net: Model object, network for the forward pass.
images: Absolute image paths as strings.
image_size: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
scales: List of float scales.
multi_scale_power: Float, multi-scale normalization power parameter.
print_freq: Printing frequency for debugging.
Returns:
descriptors: Global descriptors for the input images.
"""
# Creating dataset loader.
data = generic_dataset.ImagesFromList(root='', image_paths=images,
imsize=image_size,
bounding_boxes=bounding_boxes)
def _data_gen():
return (inst for inst in data)
loader = tf.data.Dataset.from_generator(_data_gen, output_types=(tf.float32))
loader = loader.batch(1)
# Extracting vectors.
descriptors = tf.zeros((0, net.meta['outputdim']))
for i, input in enumerate(loader):
if len(scales) == 1 and scales[0] == 1:
descriptors = tf.concat([descriptors, net(input)], 0)
else:
descriptors = tf.concat(
[descriptors, extract_multi_scale_descriptor(
net, input, scales, multi_scale_power)], 0)
if (i + 1) % print_freq == 0 or (i + 1) == len(images):
global_features_utils.debug_and_log(
'\r>>>> {}/{} done...'.format((i + 1), len(images)),
debug_on_the_same_line=True)
global_features_utils.debug_and_log('', log=False)
descriptors = tf.transpose(descriptors, perm=[1, 0])
return descriptors
def extract_multi_scale_descriptor(net, input, scales, multi_scale_power):
"""Extracts the global descriptor multi scale.
Args:
net: Model object, network for the forward pass.
input: [B, H, W, C] input tensor in channel-last (BHWC) configuration.
scales: List of float scales.
multi_scale_power: Float, multi-scale normalization power parameter.
Returns:
descriptors: Multi-scale global descriptors for the input images.
"""
descriptors = tf.zeros(net.meta['outputdim'])
for s in scales:
if s == 1:
input_t = input
else:
output_shape = s * tf.shape(input)[1:3].numpy()
input_t = tf.image.resize(input, output_shape,
method='bilinear',
preserve_aspect_ratio=True)
descriptors += tf.pow(net(input_t), multi_scale_power)
descriptors /= len(scales)
descriptors = tf.pow(descriptors, 1. / multi_scale_power)
descriptors /= tf.norm(descriptors)
return descriptors
| 10,632 | 36.178322 | 80 | py |
models | models-master/research/delf/delf/python/training/model/export_CNN_global.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global CNN feature tensorflow inference model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import global_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', None, help='Path to saved checkpoint.')
flags.DEFINE_string('export_path', None,
help='Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input '
'end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input '
'scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each "
"scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
# Network architecture and initialization options.
flags.DEFINE_string('arch', 'ResNet101',
'model architecture (default: ResNet101)')
flags.DEFINE_string('pool', 'gem', 'pooling options (default: gem)')
flags.DEFINE_boolean('whitening', False,
'train model with learnable whitening (linear layer) '
'after the pooling')
def _NormalizeImages(images, *args):
"""Normalize pixel values in image.
Args:
images: `Tensor`, images to normalize.
Returns:
normalized_images: `Tensor`, normalized images.
"""
tf.keras.applications.imagenet_utils.preprocess_input(images, mode='caffe')
return images
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global
descriptor.
input_scales_tensor: If None, the exported function to be used
should be ExtractFeatures, where an input end-point "input_scales" is
added for the exported model. If not None, the specified 1D tensor of
floats will be hard-coded as the desired input scales, in conjunction
with ExtractFeaturesFixedScales.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
self._model = global_model.GlobalFeatureNet(
FLAGS.arch, FLAGS.pool, FLAGS.whitening, pretrained=False)
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8,
name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(shape=[None], dtype=tf.int32,
name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales,
input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor,
normalization_function=_NormalizeImages())
named_output_tensors = {}
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module.
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| 6,604 | 36.95977 | 80 | py |
models | models-master/research/delf/delf/python/training/losses/ranking_losses.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ranking loss definitions."""
import tensorflow as tf
class ContrastiveLoss(tf.keras.losses.Loss):
"""Contrastive Loss layer.
Contrastive Loss layer allows to compute contrastive loss for a batch of
images. Implementation based on: https://arxiv.org/abs/1604.02426.
"""
def __init__(self, margin=0.7, reduction=tf.keras.losses.Reduction.NONE):
"""Initialization of Contrastive Loss layer.
Args:
margin: Float contrastive loss margin.
reduction: Type of loss reduction.
"""
super(ContrastiveLoss, self).__init__(reduction)
self.margin = margin
# Parameter for numerical stability.
self.eps = 1e-6
def __call__(self, queries, positives, negatives):
"""Invokes the Contrastive Loss instance.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
Returns:
loss: Scalar tensor.
"""
return contrastive_loss(
queries, positives, negatives, margin=self.margin, eps=self.eps)
class TripletLoss(tf.keras.losses.Loss):
"""Triplet Loss layer.
Triplet Loss layer computes triplet loss for a batch of images. Triplet
loss tries to keep all queries closer to positives than to any negatives.
Margin is used to specify when a triplet has become too "easy" and we no
longer want to adjust the weights from it. Differently from the Contrastive
Loss, Triplet Loss uses squared distances when computing the loss.
Implementation based on: https://arxiv.org/abs/1511.07247.
"""
def __init__(self, margin=0.1, reduction=tf.keras.losses.Reduction.NONE):
"""Initialization of Triplet Loss layer.
Args:
margin: Triplet loss margin.
reduction: Type of loss reduction.
"""
super(TripletLoss, self).__init__(reduction)
self.margin = margin
def __call__(self, queries, positives, negatives):
"""Invokes the Triplet Loss instance.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
Returns:
loss: Scalar tensor.
"""
return triplet_loss(queries, positives, negatives, margin=self.margin)
def contrastive_loss(queries, positives, negatives, margin=0.7, eps=1e-6):
"""Calculates Contrastive Loss.
We expect the `queries`, `positives` and `negatives` to be normalized with
unit length for training stability. The contrastive loss directly
optimizes this distance by encouraging all positive distances to
approach 0, while keeping negative distances above a certain threshold.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
margin: Float contrastive loss loss margin.
eps: Float parameter for numerical stability.
Returns:
loss: Scalar tensor.
"""
dim = tf.shape(queries)[1]
# Number of `queries`.
batch_size = tf.shape(queries)[0]
# Number of `positives`.
np = tf.shape(positives)[0]
# Number of `negatives`.
num_neg = tf.shape(negatives)[1]
# Preparing negatives.
stacked_negatives = tf.reshape(negatives, [num_neg * batch_size, dim])
# Preparing queries for further loss calculation.
stacked_queries = tf.repeat(queries, num_neg + 1, axis=0)
positives_and_negatives = tf.concat([positives, stacked_negatives], axis=0)
# Calculate an Euclidean norm for each pair of points. For any positive
# pair of data points this distance should be small, and for
# negative pair it should be large.
distances = tf.norm(stacked_queries - positives_and_negatives + eps, axis=1)
positives_part = 0.5 * tf.pow(distances[:np], 2.0)
negatives_part = 0.5 * tf.pow(
tf.math.maximum(margin - distances[np:], 0), 2.0)
# Final contrastive loss calculation.
loss = tf.reduce_sum(tf.concat([positives_part, negatives_part], 0))
return loss
def triplet_loss(queries, positives, negatives, margin=0.1):
"""Calculates Triplet Loss.
Triplet loss tries to keep all queries closer to positives than to any
negatives. Differently from the Contrastive Loss, Triplet Loss uses squared
distances when computing the loss.
Args:
queries: [batch_size, dim] Anchor input tensor.
positives: [batch_size, dim] Positive sample input tensor.
negatives: [batch_size, num_neg, dim] Negative sample input tensor.
margin: Float triplet loss loss margin.
Returns:
loss: Scalar tensor.
"""
dim = tf.shape(queries)[1]
# Number of `queries`.
batch_size = tf.shape(queries)[0]
# Number of `negatives`.
num_neg = tf.shape(negatives)[1]
# Preparing negatives.
stacked_negatives = tf.reshape(negatives, [num_neg * batch_size, dim])
# Preparing queries for further loss calculation.
stacked_queries = tf.repeat(queries, num_neg, axis=0)
# Preparing positives for further loss calculation.
stacked_positives = tf.repeat(positives, num_neg, axis=0)
# Computes *squared* distances.
distance_positives = tf.reduce_sum(
tf.square(stacked_queries - stacked_positives), axis=1)
distance_negatives = tf.reduce_sum(
tf.square(stacked_queries - stacked_negatives), axis=1)
# Final triplet loss calculation.
loss = tf.reduce_sum(
tf.maximum(distance_positives - distance_negatives + margin, 0.0))
return loss
| 6,201 | 34.238636 | 80 | py |
models | models-master/research/delf/delf/python/datasets/utils.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Supporting functions for data loading."""
import numpy as np
from PIL import Image
import tensorflow as tf
from delf import utils as image_loading_utils
def pil_imagenet_loader(path, imsize, bounding_box=None, preprocess=True):
"""Pillow loader for the images.
Args:
path: Path to image to be loaded.
imsize: Integer, defines the maximum size of longer image side.
bounding_box: (x1,y1,x2,y2) tuple to crop the query image.
preprocess: Bool, whether to preprocess the images in respect to the
ImageNet dataset.
Returns:
image: `Tensor`, image in ImageNet suitable format.
"""
img = image_loading_utils.RgbLoader(path)
if bounding_box is not None:
imfullsize = max(img.size)
img = img.crop(bounding_box)
imsize = imsize * max(img.size) / imfullsize
# Unlike `resize`, `thumbnail` resizes to the largest size that preserves
# the aspect ratio, making sure that the output image does not exceed the
# original image size and the size specified in the arguments of thumbnail.
img.thumbnail((imsize, imsize), Image.ANTIALIAS)
img = np.array(img)
if preprocess:
# Preprocessing for ImageNet data. Converts the images from RGB to BGR,
# then zero-centers each color channel with respect to the ImageNet
# dataset, without scaling.
tf.keras.applications.imagenet_utils.preprocess_input(img, mode='caffe')
return img
def default_loader(path, imsize, bounding_box=None, preprocess=True):
"""Default loader for the images is using Pillow.
Args:
path: Path to image to be loaded.
imsize: Integer, defines the maximum size of longer image side.
bounding_box: (x1,y1,x2,y2) tuple to crop the query image.
preprocess: Bool, whether to preprocess the images in respect to the
ImageNet dataset.
Returns:
image: `Tensor`, image in ImageNet suitable format.
"""
img = pil_imagenet_loader(path, imsize, bounding_box, preprocess)
return img
| 2,662 | 34.506667 | 80 | py |
models | models-master/research/delf/delf/python/normalization_layers/normalization.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layer definitions."""
import tensorflow as tf
class L2Normalization(tf.keras.layers.Layer):
"""Normalization layer using L2 norm."""
def __init__(self):
"""Initialization of the L2Normalization layer."""
super(L2Normalization, self).__init__()
# A lower bound value for the norm.
self.eps = 1e-6
def call(self, x, axis=1):
"""Invokes the L2Normalization instance.
Args:
x: A Tensor.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
Returns:
norm: A Tensor with the same shape as `x`.
"""
return tf.nn.l2_normalize(x, axis, epsilon=self.eps)
| 1,348 | 31.902439 | 80 | py |
models | models-master/research/delf/delf/python/pooling_layers/pooling.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers definitions."""
import tensorflow as tf
class MAC(tf.keras.layers.Layer):
"""Global max pooling (MAC) layer.
Maximum Activations of Convolutions (MAC) is simply constructed by
max-pooling over all dimensions per feature map. See
https://arxiv.org/abs/1511.05879 for a reference.
"""
def call(self, x, axis=None):
"""Invokes the MAC pooling instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return mac(x, axis=axis)
class SPoC(tf.keras.layers.Layer):
"""Average pooling (SPoC) layer.
Sum-pooled convolutional features (SPoC) is based on the sum pooling of the
deep features. See https://arxiv.org/pdf/1510.07493.pdf for a reference.
"""
def call(self, x, axis=None):
"""Invokes the SPoC instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return spoc(x, axis)
class GeM(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) layer.
Generalized Mean Pooling (GeM) computes the generalized mean of each
channel in a tensor. See https://arxiv.org/abs/1711.02512 for a reference.
"""
def __init__(self, power=3.):
"""Initialization of the generalized mean pooling (GeM) layer.
Args:
power: Float power > 0 is an inverse exponent parameter, used during the
generalized mean pooling computation. Setting this exponent as power > 1
increases the contrast of the pooled feature map and focuses on the
salient features of the image. GeM is a generalization of the average
pooling commonly used in classification networks (power = 1) and of
spatial max-pooling layer (power = inf).
"""
super(GeM, self).__init__()
self.power = power
self.eps = 1e-6
def call(self, x, axis=None):
"""Invokes the GeM instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return gem(x, power=self.power, eps=self.eps, axis=axis)
class GeMPooling2D(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) pooling operation for spatial data."""
def __init__(self,
power=20.,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format='channels_last'):
"""Initialization of GeMPooling2D.
Args:
power: Float, power > 0. is an inverse exponent parameter (GeM power).
pool_size: Integer or tuple of 2 integers, factors by which to downscale
(vertical, horizontal)
strides: Integer, tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`.
padding: One of `valid` or `same`. `valid` means no padding. `same`
results in padding evenly to the left/right or up/down of the input such
that output has the same height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height, width,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, height, width)`.
"""
super(GeMPooling2D, self).__init__()
self.power = power
self.eps = 1e-6
self.pool_size = pool_size
self.strides = strides
self.padding = padding.upper()
data_format_conv = {
'channels_last': 'NHWC',
'channels_first': 'NCHW',
}
self.data_format = data_format_conv[data_format]
def call(self, x):
tmp = tf.pow(x, self.power)
tmp = tf.nn.avg_pool(tmp, self.pool_size, self.strides, self.padding,
self.data_format)
out = tf.pow(tmp, 1. / self.power)
return out
def mac(x, axis=None):
"""Performs global max pooling (MAC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_max(x, axis=axis, keepdims=False)
def spoc(x, axis=None):
"""Performs average pooling (SPoC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_mean(x, axis=axis, keepdims=False)
def gem(x, axis=None, power=3., eps=1e-6):
"""Performs generalized mean pooling (GeM).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
power: Float, power > 0 is an inverse exponent parameter (GeM power).
eps: Float, parameter for numerical stability.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
tmp = tf.pow(tf.maximum(x, eps), power)
out = tf.pow(tf.reduce_mean(tmp, axis=axis, keepdims=False), 1. / power)
return out
| 6,125 | 30.415385 | 80 | py |
models | models-master/research/delf/delf/python/pooling_layers/pooling_test.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pooling layers."""
import tensorflow as tf
from delf.python.pooling_layers import pooling
class PoolingsTest(tf.test.TestCase):
def testMac(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.mac(x)
# Define expected result.
exp_output = [[6., 7.]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testSpoc(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.spoc(x)
# Define expected result.
exp_output = [[3., 4.]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testGem(self):
x = tf.constant([[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]])
# Run tested function.
result = pooling.gem(x, power=3., eps=1e-6)
# Define expected result.
exp_output = [[4.1601677, 4.9866314]]
# Compare actual and expected.
self.assertAllClose(exp_output, result)
def testGeMPooling2D(self):
# Create a testing tensor.
x = tf.constant([[[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]])
x = tf.reshape(x, [1, 3, 3, 1])
# Checking GeMPooling2D relation to MaxPooling2D for the large values of
# `p`.
max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_max = max_pool_2d(x)
gem_pool_2d = pooling.GeMPooling2D(power=30., pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_gem_max = gem_pool_2d(x)
# Check that for large `p` GeMPooling2D is close to MaxPooling2D.
self.assertAllEqual(out_max, tf.round(out_gem_max))
# Checking GeMPooling2D relation to AveragePooling2D for the value
# of `p` = 1.
avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='valid')
out_avg = avg_pool_2d(x)
gem_pool_2d = pooling.GeMPooling2D(power=1., pool_size=(2, 2),
strides=(1, 1), padding='valid')
out_gem_avg = gem_pool_2d(x)
# Check that for `p` equals 1., GeMPooling2D becomes AveragePooling2D.
self.assertAllEqual(out_avg, out_gem_avg)
if __name__ == '__main__':
tf.test.main()
| 3,148 | 36.047059 | 80 | py |
models | models-master/research/deeplab/core/conv2d_ws.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Augment slim.conv2d with optional Weight Standardization (WS).
WS is a normalization method to accelerate micro-batch training. When used with
Group Normalization and trained with 1 image/GPU, WS is able to match or
outperform the performances of BN trained with large batch sizes.
[1] Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille
Weight Standardization. arXiv:1903.10520
[2] Lei Huang, Xianglong Liu, Yang Liu, Bo Lang, Dacheng Tao
Centered Weight Normalization in Accelerating Training of Deep Neural
Networks. ICCV 2017
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
class Conv2D(tf.keras.layers.Conv2D, tf.layers.Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
use_weight_standardization=False,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
"""Constructs the 2D convolution layer.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value !=
1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height, width,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any
stride value != 1.
activation: Activation function. Set it to None to maintain a linear
activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
use_weight_standardization: Boolean, whether the layer uses weight
standardization.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the bias
after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
**kwargs: Arbitrary keyword arguments passed to tf.keras.layers.Conv2D
"""
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.use_weight_standardization = use_weight_standardization
def call(self, inputs):
if self.use_weight_standardization:
mean, var = tf.nn.moments(self.kernel, [0, 1, 2], keep_dims=True)
kernel = (self.kernel - mean) / tf.sqrt(var + 1e-5)
outputs = self._convolution_op(inputs, kernel)
else:
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# tf.nn.bias_add does not accept a 1D input tensor.
bias = tf.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
@contrib_framework.add_arg_scope
def conv2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=contrib_layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
use_weight_standardization=False,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape +
[in_channels]` if data_format does not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same value
for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
use_weight_standardization: Boolean, whether the layer uses weight
standardization.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
# pylint: disable=protected-access
layer_variable_getter = layers._build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
# pylint: enable=protected-access
with tf.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = tf.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank != 4:
raise ValueError('Convolution expects input with rank %d, got %d' %
(4, input_rank))
data_format = ('channels_first' if data_format and
data_format.startswith('NC') else 'channels_last')
layer = Conv2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=data_format,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
use_weight_standardization=use_weight_standardization,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
# pylint: disable=protected-access
layers._add_variable_to_collections(layer.kernel, variables_collections,
'weights')
if layer.use_bias:
layers._add_variable_to_collections(layer.bias, variables_collections,
'biases')
# pylint: enable=protected-access
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
| 15,881 | 41.924324 | 80 | py |
models | models-master/research/deeplab/core/resnet_v1_beta.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet v1 model variants.
Code branched out from slim/nets/resnet_v1.py, and please refer to it for
more details.
The original version ResNets-v1 were proposed by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from six.moves import range
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from deeplab.core import conv2d_ws
from deeplab.core import utils
from tensorflow.contrib.slim.nets import resnet_utils
slim = contrib_slim
_DEFAULT_MULTI_GRID = [1, 1, 1]
_DEFAULT_MULTI_GRID_RESNET_18 = [1, 1]
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
unit_rate=1,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
unit_rate: An integer, unit rate for atrous convolution.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = conv2d_ws.conv2d(
inputs,
depth,
[1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = conv2d_ws.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = conv2d_ws.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate*unit_rate, scope='conv2')
residual = conv2d_ws.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections, sc.name,
output)
@slim.add_arg_scope
def lite_bottleneck(inputs,
depth,
stride,
unit_rate=1,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
unit_rate: An integer, unit rate for atrous convolution.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'lite_bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = conv2d_ws.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = conv2d_ws.conv2d_same(
inputs, depth, 3, 1, rate=rate * unit_rate, scope='conv1')
with slim.arg_scope([conv2d_ws.conv2d], activation_fn=None):
residual = conv2d_ws.conv2d_same(
residual, depth, 3, stride, rate=rate * unit_rate, scope='conv2')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections, sc.name,
output)
def root_block_fn_for_beta_variant(net, depth_multiplier=1.0):
"""Gets root_block_fn for beta variant.
ResNet-v1 beta variant modifies the first original 7x7 convolution to three
3x3 convolutions.
Args:
net: A tensor of size [batch, height, width, channels], input to the model.
depth_multiplier: Controls the number of convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_out * depth_multiplier`.
Returns:
A tensor after three 3x3 convolutions.
"""
net = conv2d_ws.conv2d_same(
net, int(64 * depth_multiplier), 3, stride=2, scope='conv1_1')
net = conv2d_ws.conv2d_same(
net, int(64 * depth_multiplier), 3, stride=1, scope='conv1_2')
net = conv2d_ws.conv2d_same(
net, int(128 * depth_multiplier), 3, stride=1, scope='conv1_3')
return net
def resnet_v1_beta(inputs,
blocks,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
root_block_fn=None,
reuse=None,
scope=None,
sync_batch_norm_method='None'):
"""Generator for v1 ResNet models (beta variant).
This function generates a family of modified ResNet v1 models. In particular,
the first original 7x7 convolution is replaced with three 3x3 convolutions.
See the resnet_v1_*() methods for specific model instantiations, obtained by
selecting different block instantiations that produce ResNets of various
depths.
The code is modified from slim/nets/resnet_v1.py, and please refer to it for
more details.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
root_block_fn: The function consisting of convolution operations applied to
the root input. If root_block_fn is None, use the original setting of
RseNet-v1, which is simply one convolution with 7x7 kernel and stride=2.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
if root_block_fn is None:
root_block_fn = functools.partial(conv2d_ws.conv2d_same,
num_outputs=64,
kernel_size=7,
stride=2,
scope='conv1')
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([
conv2d_ws.conv2d, bottleneck, lite_bottleneck,
resnet_utils.stack_blocks_dense
],
outputs_collections=end_points_collection):
if is_training is not None:
arg_scope = slim.arg_scope([batch_norm], is_training=is_training)
else:
arg_scope = slim.arg_scope([])
with arg_scope:
net = inputs
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride //= 4
net = root_block_fn(net)
net = slim.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = conv2d_ws.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits',
use_weight_standardization=False)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def resnet_v1_beta_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 beta variant bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1,
'unit_rate': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride,
'unit_rate': 1
}])
def resnet_v1_small_beta_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_18 beta variant bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_18 bottleneck block.
"""
block_args = []
for _ in range(num_units - 1):
block_args.append({'depth': base_depth, 'stride': 1, 'unit_rate': 1})
block_args.append({'depth': base_depth, 'stride': stride, 'unit_rate': 1})
return resnet_utils.Block(scope, lite_bottleneck, block_args)
def resnet_v1_18(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_18',
sync_batch_norm_method='None'):
"""Resnet v1 18.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID_RESNET_18
else:
if len(multi_grid) != 2:
raise ValueError('Expect multi_grid to have length 2.')
block4_args = []
for rate in multi_grid:
block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate})
blocks = [
resnet_v1_small_beta_block(
'block1', base_depth=64, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block2', base_depth=128, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block3', base_depth=256, num_units=2, stride=2),
resnet_utils.Block('block4', lite_bottleneck, block4_args),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_18_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
root_depth_multiplier=0.25,
reuse=None,
scope='resnet_v1_18',
sync_batch_norm_method='None'):
"""Resnet v1 18 beta variant.
This variant modifies the first convolution layer of ResNet-v1-18. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
root_depth_multiplier: Float, depth multiplier used for the first three
convolution layers that replace the 7x7 convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID_RESNET_18
else:
if len(multi_grid) != 2:
raise ValueError('Expect multi_grid to have length 2.')
block4_args = []
for rate in multi_grid:
block4_args.append({'depth': 512, 'stride': 1, 'unit_rate': rate})
blocks = [
resnet_v1_small_beta_block(
'block1', base_depth=64, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block2', base_depth=128, num_units=2, stride=2),
resnet_v1_small_beta_block(
'block3', base_depth=256, num_units=2, stride=2),
resnet_utils.Block('block4', lite_bottleneck, block4_args),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant,
depth_multiplier=root_depth_multiplier),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_50(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_50',
sync_batch_norm_method='None'):
"""Resnet v1 50.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=6, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_50_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_50',
sync_batch_norm_method='None'):
"""Resnet v1 50 beta variant.
This variant modifies the first convolution layer of ResNet-v1-50. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=6, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_101(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_101',
sync_batch_norm_method='None'):
"""Resnet v1 101.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=23, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_v1_101_beta(inputs,
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
multi_grid=None,
reuse=None,
scope='resnet_v1_101',
sync_batch_norm_method='None'):
"""Resnet v1 101 beta variant.
This variant modifies the first convolution layer of ResNet-v1-101. In
particular, it changes the original one 7x7 convolution to three 3x3
convolutions.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: Enable/disable is_training for batch normalization.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
sync_batch_norm_method: String, sync batchnorm method.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if multi_grid is not None and does not have length = 3.
"""
if multi_grid is None:
multi_grid = _DEFAULT_MULTI_GRID
else:
if len(multi_grid) != 3:
raise ValueError('Expect multi_grid to have length 3.')
blocks = [
resnet_v1_beta_block(
'block1', base_depth=64, num_units=3, stride=2),
resnet_v1_beta_block(
'block2', base_depth=128, num_units=4, stride=2),
resnet_v1_beta_block(
'block3', base_depth=256, num_units=23, stride=2),
resnet_utils.Block('block4', bottleneck, [
{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1,
'unit_rate': rate} for rate in multi_grid]),
]
return resnet_v1_beta(
inputs,
blocks=blocks,
num_classes=num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
root_block_fn=functools.partial(root_block_fn_for_beta_variant),
reuse=reuse,
scope=scope,
sync_batch_norm_method=sync_batch_norm_method)
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
sync_batch_norm_method='None',
normalization_method='unspecified',
use_weight_standardization=False):
"""Defines the default ResNet arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Deprecated in favor of normalization_method.
sync_batch_norm_method: String, sync batchnorm method.
normalization_method: String, one of `batch`, `none`, or `group`, to use
batch normalization, no normalization, or group normalization.
use_weight_standardization: Boolean, whether to use weight standardization.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
}
batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method)
if normalization_method == 'batch':
normalizer_fn = batch_norm
elif normalization_method == 'none':
normalizer_fn = None
elif normalization_method == 'group':
normalizer_fn = slim.group_norm
elif normalization_method == 'unspecified':
normalizer_fn = batch_norm if use_batch_norm else None
else:
raise ValueError('Unrecognized normalization_method %s' %
normalization_method)
with slim.arg_scope([conv2d_ws.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
use_weight_standardization=use_weight_standardization):
with slim.arg_scope([batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 34,236 | 40.349034 | 80 | py |
models | models-master/research/seq_flow_lite/trainer_v2.py | # Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary to train PRADO model with TF 2.0."""
import importlib
import json
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import input_fn_reader # import root module
FLAGS = flags.FLAGS
flags.DEFINE_string("config_path", None, "Path to a RunnerConfig.")
flags.DEFINE_enum("runner_mode", "train", ["train", "train_and_eval", "eval"],
"Runner mode.")
flags.DEFINE_string("master", None, "TensorFlow master URL.")
flags.DEFINE_string(
"output_dir", "/tmp/testV2",
"The output directory where the model checkpoints will be written.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def load_runner_config():
with tf.io.gfile.GFile(FLAGS.config_path, "r") as f:
return json.loads(f.read())
def compute_loss(logits, labels, model_config, mode):
"""Creates a sequence labeling model."""
if mode != tf_estimator.ModeKeys.PREDICT:
if not model_config["multilabel"]:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
else:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
else:
loss = None
return loss
def model_fn_builder(runner_config, mode):
"""Returns `model_fn` closure for TPUEstimator."""
rel_module_path = "" # empty base dir
model = importlib.import_module(rel_module_path + runner_config["name"])
model_config = runner_config["model_config"]
return model.Encoder(model_config, mode)
def main(_):
runner_config = load_runner_config()
if FLAGS.output_dir:
tf.io.gfile.makedirs(FLAGS.output_dir)
train_model = model_fn_builder(runner_config, tf_estimator.ModeKeys.TRAIN)
optimizer = tf.keras.optimizers.Adam()
train_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.TRAIN,
drop_remainder=True)
params = {"batch_size": runner_config["batch_size"]}
train_ds = train_input_fn(params)
train_loss = tf.keras.metrics.Mean(name="train_loss")
@tf.function
def train_step(features):
with tf.GradientTape() as tape:
logits = train_model(features["projection"], features["seq_length"])
loss = compute_loss(logits, features["label"],
runner_config["model_config"],
tf_estimator.ModeKeys.TRAIN)
gradients = tape.gradient(loss, train_model.trainable_variables)
optimizer.apply_gradients(zip(gradients, train_model.trainable_variables))
train_loss(loss)
for epoch in range(1):
train_loss.reset_states()
for features in train_ds:
train_step(features)
step = optimizer.iterations.numpy()
if step % 100 == 0:
logging.info("Running step %s in epoch %s", step, epoch)
logging.info("Training loss: %s, epoch: %s, step: %s",
round(train_loss.result().numpy(), 4), epoch, step)
if __name__ == "__main__":
app.run(main)
| 3,869 | 32.947368 | 80 | py |
models | models-master/research/seq_flow_lite/trainer.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility for PRADO model to do train, eval, inference and model export."""
import importlib
import json
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import input_fn_reader # import root module
import metric_functions # import root module
tf.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string("config_path", None, "Path to a RunnerConfig.")
flags.DEFINE_enum("runner_mode", None, ["train", "train_and_eval", "eval"],
"Runner mode.")
flags.DEFINE_string("master", None, "TensorFlow master URL.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def load_runner_config():
with tf.gfile.GFile(FLAGS.config_path, "r") as f:
return json.loads(f.read())
def create_model(model, model_config, features, mode, model_name):
"""Creates a sequence labeling model."""
keras_model = model.Encoder(model_config, mode)
if any(model in model_name for model in ["pqrnn", "prado"]):
logits = keras_model(features["projection"], features["seq_length"])
else:
logits = keras_model(features["token_ids"], features["token_len"])
if mode != tf_estimator.ModeKeys.PREDICT:
if not model_config["multilabel"]:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=features["label"], logits=logits)
else:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=features["label"], logits=logits)
loss = tf.reduce_mean(loss)
loss += tf.add_n(keras_model.losses)
else:
loss = None
return (loss, logits)
def create_optimizer(loss, runner_config, params):
"""Returns a train_op using Adam optimizer."""
learning_rate = tf.train.exponential_decay(
learning_rate=runner_config["learning_rate"],
global_step=tf.train.get_global_step(),
decay_steps=runner_config["learning_rate_decay_steps"],
decay_rate=runner_config["learning_rate_decay_rate"],
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
if params["use_tpu"]:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
return optimizer.minimize(loss, global_step=tf.train.get_global_step())
def model_fn_builder(runner_config):
"""Returns `model_fn` closure for TPUEstimator."""
rel_module_path = "" # empty base dir
model = importlib.import_module(rel_module_path + runner_config["name"])
def model_fn(features, mode, params):
"""The `model_fn` for TPUEstimator."""
label_ids = None
if mode != tf_estimator.ModeKeys.PREDICT:
label_ids = features["label"]
model_config = runner_config["model_config"]
loss, logits = create_model(model, model_config, features, mode,
runner_config["name"])
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = create_optimizer(loss, runner_config, params)
return tf_estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
elif mode == tf_estimator.ModeKeys.EVAL:
if not runner_config["model_config"]["multilabel"]:
metric_fn = metric_functions.classification_metric
else:
metric_fn = metric_functions.labeling_metric
eval_metrics = (metric_fn, [loss, label_ids, logits])
return tf_estimator.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, eval_metrics=eval_metrics)
elif mode == tf_estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
if not runner_config["model_config"]["multilabel"]:
predictions["predictions"] = tf.nn.softmax(logits)
else:
predictions["predictions"] = tf.math.sigmoid(logits)
return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
assert False, "Expected to be called in TRAIN, EVAL, or PREDICT mode."
return model_fn
def main(_):
runner_config = load_runner_config()
if FLAGS.output_dir:
tf.gfile.MakeDirs(FLAGS.output_dir)
is_per_host = tf_estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf_estimator.tpu.RunConfig(
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=runner_config["save_checkpoints_steps"],
keep_checkpoint_max=20,
tpu_config=tf_estimator.tpu.TPUConfig(
iterations_per_loop=runner_config["iterations_per_loop"],
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(runner_config)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
batch_size = runner_config["batch_size"]
estimator = tf_estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size)
if FLAGS.runner_mode == "train":
train_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.TRAIN,
drop_remainder=True)
estimator.train(
input_fn=train_input_fn, max_steps=runner_config["train_steps"])
elif FLAGS.runner_mode == "eval":
# TPU needs fixed shapes, so if the last batch is smaller, we drop it.
eval_input_fn = input_fn_reader.create_input_fn(
runner_config=runner_config,
mode=tf_estimator.ModeKeys.EVAL,
drop_remainder=True)
for _ in tf.train.checkpoints_iterator(FLAGS.output_dir, timeout=600):
result = estimator.evaluate(input_fn=eval_input_fn)
for key in sorted(result):
logging.info(" %s = %s", key, str(result[key]))
if __name__ == "__main__":
app.run(main)
| 6,640 | 35.489011 | 80 | py |
models | models-master/research/seq_flow_lite/models/pqrnn.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of pQRNN model."""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import projection_layers # import seq_flow_lite module
from layers import qrnn_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""A pQRNN keras model."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("projection_bottleneck_size")
_get_params("qrnn_state_size")
_get_params("qrnn_kernel_width", 3)
_get_params("qrnn_zoneout_probability")
_get_params("number_qrnn_layers")
_get_params("labels", [])
_get_params("num_labels", None)
_get_params("regularizer_scale")
_get_params("quantize")
self.num_classes = self.num_labels or len(self.labels)
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
units=self.projection_bottleneck_size,
rank=3,
parameters=self.parameters)
self.qrnn_stack = qrnn_layers.QRNNBidirectionalStack(
parameters=self.parameters,
zoneout_probability=self.qrnn_zoneout_probability,
kwidth=self.qrnn_kernel_width,
state_size=self.qrnn_state_size,
num_layers=self.number_qrnn_layers)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, projection, seq_length):
mask = tf.sequence_mask(
seq_length, tf.shape(projection)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
projection = projection * maskr3
bottleneck = self.bottleneck_layer(projection, maskr3, inverse_normalizer)
outputs = self.qrnn_stack(bottleneck, maskr3, inverse_normalizer)
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
class Model(Encoder):
def __init__(self, config, mode, **kwargs):
super(Model, self).__init__(config, mode, **kwargs)
self.projection = projection_layers.ProjectionLayer(config, mode)
def call(self, inputs):
projection, seq_length = self.projection(inputs)
return super(Model, self).call(projection, seq_length)
| 3,838 | 37.009901 | 80 | py |
models | models-master/research/seq_flow_lite/models/charformer.py | # Copyright 2022 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Charformer based model for in-training tokenization."""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from models import transformer_encoder # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""Encoder with GBST and Transformer layers."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("labels", [])
_get_params("regularizer_scale")
_get_params("quantize")
_get_params("feature_size")
_get_params("bottleneck_size")
self.max_seq_len = config.get("max_seq_len", 128)
self.gbst_max_token_len = config.get("gbst_max_token_len", 128)
# Including 3 additional special token ids (0=padding, 1=EOS, 2=UNK).
self.vocabulary_size = config.get("vocabulary_size", 259)
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.feature_size],
parameters=self.parameters)
self.gbst_downsample_rate = config.get("gbst_downsample_rate", 1)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.gbst_max_token_len, self.feature_size],
parameters=self.parameters)
self.ln = normalization_layers.LayerNormalization(
parameters=self.parameters)
self.qact = quantization_layers.ActivationQuantization(
parameters=self.parameters)
self.bottleneck_layer = None
gbst_size = self.feature_size
if self.bottleneck_size != self.feature_size:
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
self.bottleneck_size,
rank=3,
normalize=False,
activation=None,
parameters=self.parameters)
gbst_size = self.bottleneck_size
self.gbst_max_subword_block_width = config.get(
"gbst_max_subword_block_width", 5)
self.gbst_conv_kernel_size = config.get("gbst_conv_kernel_size", 5)
self.gbst_block_mixing_mode = config.get("gbst_block_mixing_mode", None)
self.gbst_layer = misc_layers.GBSTLayerV2(
feature_size=gbst_size,
max_seq_len=self.gbst_max_token_len,
downsample_rate=self.gbst_downsample_rate,
max_subword_block_width=self.gbst_max_subword_block_width,
conv_kernel_size=self.gbst_conv_kernel_size,
block_mixing_mode=self.gbst_block_mixing_mode,
parameters=self.parameters)
self.pool_windows = config.get("pool_windows", None)
if self.pool_windows:
self.transformer_encoder_layer = transformer_encoder.FunnelTransformerModel(
config, mode)
else:
self.transformer_encoder_layer = transformer_encoder.ModelWithEmbeddings(
config, mode)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
self.num_classes = len(self.labels)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, token_ids, seq_length):
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(token_ids), dtype=tf.int32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
else:
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(token_ids)[1], dtype=tf.float32)
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
input_values = self.embedding(token_ids)
pos_values = self.positional_embedding(pos_indices)
input_embeds = self.qact(self.ln(input_values + pos_values))
if self.bottleneck_layer is not None:
maskr3 = tf.expand_dims(mask_rank2, axis=2)
maskr3 = tf.cast(maskr3, tf.float32)
bottleneck_output = self.bottleneck_layer(input_embeds, maskr3)
else:
bottleneck_output = input_embeds
gbst_output = self.gbst_layer(bottleneck_output, seq_length)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(gbst_output)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
seq_length = seq_length / self.gbst_downsample_rate
if self.pool_windows:
outputs, mask = self.transformer_encoder_layer(gbst_output,
seq_length)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
pre_logits = self.attention_pool(outputs, mask, inverse_normalizer)
else:
outputs = self.transformer_encoder_layer(gbst_output, seq_length)
mask = tf.sequence_mask(
seq_length, tf.shape(outputs)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
| 6,598 | 41.850649 | 82 | py |
models | models-master/research/seq_flow_lite/models/byteqrnn.py | # Copyright 2022 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ByteQRNN based model for in-training tokenization.
Sample model params:
"feature_size": 128, # Embedding size for each byte
"gbst_max_token_len": 1024, # Max sequence length of bytes in GBST
"gbst_downsample_rate": 1, # Downsample factor for GBST output
"bottleneck_size": 128, # Bottleneck size before feeding to QRNN
"qrnn_state_size": 128, # QRNN layer param
"qrnn_kernel_width": 3, # QRNN layer param
"qrnn_zoneout_probability": 1e-2, # QRNN layer param
"distortion_probability": 0.25, # QRNN layer param
"number_qrnn_layers": 3, # QRNN layer param
"labels": [], # List of labels for getting num classes
"regularizer_scale": 1e-5, # L2 Regularization scale
"quantize": true, # Enable quantization
"multilabel": true, # If the output is Multilabel
"""
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import misc_layers # import seq_flow_lite module
from layers import qrnn_layers # import seq_flow_lite module
class Encoder(tf.keras.layers.Layer):
"""Encoder with GBST and QRNN layers."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config.get(varname, default_value)
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("feature_size")
_get_params("bottleneck_size", self.feature_size)
_get_params("qrnn_state_size")
_get_params("qrnn_kernel_width", 3)
_get_params("qrnn_zoneout_probability")
_get_params("number_qrnn_layers")
_get_params("labels", [])
_get_params("regularizer_scale")
_get_params("quantize")
_get_params("gbst_max_token_len", 128)
_get_params("gbst_downsample_rate", 1)
_get_params("gbst_max_subword_block_width", 4)
_get_params("gbst_conv_kernel_size", 5)
_get_params("gbst_block_mixing_mode")
_get_params("gbst_add_block_pos_embed", False)
_get_params("attn_pool_output", True)
self.num_classes = len(config.get("labels", []))
self.parameters = base_layers.Parameters(
mode, quantize=self.quantize, regularizer_scale=self.regularizer_scale)
# Including 3 additional special token ids (0=padding, 1=EOS, 2=UNK).
self.vocabulary_size = 259
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.feature_size],
parameters=self.parameters)
self.bottleneck_layer = dense_layers.BaseQDenseVarLen(
units=self.bottleneck_size,
rank=3,
parameters=self.parameters)
self.gbst_layer = misc_layers.GBSTLayerV2(
feature_size=self.bottleneck_size,
max_seq_len=self.gbst_max_token_len,
downsample_rate=self.gbst_downsample_rate,
max_subword_block_width=self.gbst_max_subword_block_width,
conv_kernel_size=self.gbst_conv_kernel_size,
block_mixing_mode=self.gbst_block_mixing_mode,
add_block_pos_embed=self.gbst_add_block_pos_embed,
parameters=self.parameters)
self.qrnn_stack = qrnn_layers.QRNNBidirectionalStack(
parameters=self.parameters,
zoneout_probability=self.qrnn_zoneout_probability,
kwidth=self.qrnn_kernel_width,
state_size=self.qrnn_state_size,
num_layers=self.number_qrnn_layers)
self.attention_pool = misc_layers.AttentionPooling(
parameters=self.parameters)
if self.num_classes:
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def call(self, token_ids, seq_length):
input_embeds = self.embedding(token_ids)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(input_embeds)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(input_embeds)[1], dtype=tf.float32)
maskr3 = tf.expand_dims(mask_rank2, axis=2)
gbst_input = self.bottleneck_layer(input_embeds, maskr3)
gbst_output = self.gbst_layer(gbst_input, seq_length)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
mask_rank2 = tf.ones(tf.shape(gbst_output)[:-1], dtype=tf.float32)
seq_length = tf.reduce_sum(mask_rank2, axis=1)
else:
seq_length = seq_length / self.gbst_downsample_rate
mask_rank2 = tf.sequence_mask(
seq_length, tf.shape(gbst_output)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank2))
maskr3 = tf.expand_dims(mask_rank2, axis=2)
outputs = self.qrnn_stack(gbst_output, maskr3, inverse_normalizer)
if self.attn_pool_output:
pre_logits = self.attention_pool(outputs, maskr3, inverse_normalizer)
if self.num_classes:
return self.final_fc(pre_logits)
else:
return pre_logits
else:
return outputs
| 6,033 | 41.195804 | 80 | py |
models | models-master/research/seq_flow_lite/models/transformer_encoder.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of pQRNN model."""
# pylint: disable=arguments-renamed
from absl import logging
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import transformer_layers # import seq_flow_lite module
class Model(tf.keras.layers.Layer):
"""Quantized transformer encoder."""
def __init__(self, config, mode):
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("intermediate_size")
_get_params("max_time_step")
_get_params("embedding_size")
_get_params("vocabulary_size")
_get_params("num_layers")
_get_params("labels")
_get_params("regularizer_scale")
_get_params("num_heads")
_get_params("model_dimension")
_get_params("quantize")
_get_params("activation_dropout_rate", 0.0)
_get_params("attention_dropout_rate", 0.0)
self.parameters = base_layers.Parameters(mode, self.quantize,
self.regularizer_scale)
super(Model, self).__init__()
def build(self, input_shape):
self.transformer = transformer_layers.TransformerEncoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate)
def call(self, indices, sequence_length):
return self.transformer(indices, sequence_length)
class ModelWithEmbeddings(Model):
"""Quantized transformer encoder which takes embeddings instead of indices."""
def build(self, input_shape):
self.transformer_with_input_embedding = transformer_layers.TransformerEncoderStackWithInputEmbedding(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate)
def call(self, embeddings, sequence_length):
return self.transformer_with_input_embedding(embeddings, sequence_length)
class FunnelTransformerModel(Model):
"""Quantized transformer encoder which takes embeddings instead of indices."""
def __init__(self, config, mode):
self.pool_windows = config.get("pool_windows", None)
super(FunnelTransformerModel, self).__init__(config, mode)
def build(self, input_shape):
self.funnel_transformer = transformer_layers.FunnelTransformerEncoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
activation_dropout_rate=self.activation_dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
pool_windows=self.pool_windows)
def call(self, embeddings, sequence_length):
return self.funnel_transformer(embeddings, sequence_length)
| 4,408 | 38.017699 | 105 | py |
models | models-master/research/seq_flow_lite/models/transformer_uniform_attn_decoder.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Transformer decoder model."""
import math
from absl import logging
from tensor2tensor.utils import beam_search
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from layers import transformer_layers # import seq_flow_lite module
class TransformerUniformAttnDecoder(base_layers.BaseLayer):
"""Transformer Uniform Attention Decoder."""
def __init__(self,
model_dimension,
max_time_step,
num_heads,
intermediate_size,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
beam_size=1,
cached_kv=False,
**kwargs):
self.model_dimension = model_dimension
self.decoder_uniform_attn = transformer_layers.DecoderUniformAttention(
model_dimension,
max_time_step,
attention_dropout_rate=attention_dropout_rate,
beam_size=beam_size,
**kwargs)
self.multihead_cross_attn = transformer_layers.DecoderMultiheadAttention(
model_dimension,
num_heads,
cached_kv=cached_kv,
attention_dropout_rate=attention_dropout_rate,
**kwargs)
self.prx = dense_layers.BaseQDense(
model_dimension, activation=None, normalize=False, bias=False, **kwargs)
self.upprx = dense_layers.BaseQDense(
intermediate_size, normalize=False, **kwargs)
self.downprx = dense_layers.BaseQDense(
model_dimension, activation=None, normalize=False, **kwargs)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q0 = quantization_layers.ActivationQuantization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
super(TransformerUniformAttnDecoder, self).__init__(**kwargs)
def call(self,
dec_inputs,
dec_mask,
dec_inverse_normalizer,
enc_output,
enc_mask,
enc_inverse_normalizer,
cross_attn_mask=None,
step=None,
selected_beams=None,
cache=None):
batch_size = self.get_batch_dimension(dec_inputs)
self._assert_rank_and_type(dec_inputs, 3)
self._assert_rank_and_type(dec_mask, 3)
assert dec_inputs.get_shape().as_list()[-1] == self.model_dimension
self_attn_output = self.decoder_uniform_attn(
dec_inputs,
dec_mask,
dec_inverse_normalizer,
step=step,
beam_indices=selected_beams,
cache=cache)
cross_attn_output = self.multihead_cross_attn(dec_inputs, dec_mask,
dec_inverse_normalizer,
enc_output, enc_mask,
enc_inverse_normalizer,
cross_attn_mask)
layer_out = self.q0(cross_attn_output + self_attn_output)
layer_out = tf.reshape(layer_out, [-1, self.model_dimension])
layer_out = self.prx(layer_out)
if self.parameters.mode == base_layers.TRAIN:
layer_out = tf.nn.dropout(layer_out, rate=self.activation_dropout_rate)
dec_inputs = tf.reshape(dec_inputs, [-1, self.model_dimension])
dec_inputs_updated = self.q1(self.ln1(dec_inputs + layer_out))
# Feed forward network.
layer_out = self.upprx(dec_inputs_updated)
layer_out = self.downprx(layer_out)
if self.parameters.mode == base_layers.TRAIN:
layer_out = tf.nn.dropout(layer_out, rate=self.activation_dropout_rate)
outputs = self.q2(self.ln2(dec_inputs_updated + layer_out))
return tf.reshape(outputs, [batch_size, -1, self.model_dimension])
class TransformerUniformAttnDecoderStack(base_layers.BaseLayer):
"""TransformerUniformAttnDecoderStack Decoder."""
def __init__(self,
num_layers,
max_time_step,
vocabulary_size,
embedding_size,
model_dimension,
num_heads,
intermediate_size,
beam_size=1,
activation_dropout_rate=0.1,
attention_dropout_rate=0.0,
cached_kv=False,
**kwargs):
super(TransformerUniformAttnDecoderStack, self).__init__(**kwargs)
self.max_time_step = max_time_step
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
self.activation_dropout_rate = activation_dropout_rate
self.layers = []
for _ in range(num_layers):
self.layers.append(
TransformerUniformAttnDecoder(
model_dimension=model_dimension,
max_time_step=max_time_step,
num_heads=num_heads,
intermediate_size=intermediate_size,
beam_size=beam_size,
cached_kv=cached_kv,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
def call(self,
dec_inputs,
dec_mask,
enc_output,
enc_mask,
step=None,
selected_beams=None,
cache=None):
self._assert_rank_and_type(dec_mask, 2)
self._assert_rank_and_type(enc_mask, 2)
dec_mask_rank3 = tf.expand_dims(dec_mask, axis=2)
dec_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(dec_mask_rank3))
enc_mask_rank3 = tf.expand_dims(enc_mask, 1)
enc_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(enc_mask_rank3))
cross_attn_mask = enc_mask_rank3
layer_in = dec_inputs
if self.parameters.mode == base_layers.TRAIN:
layer_in = tf.nn.dropout(layer_in, rate=self.activation_dropout_rate)
enc_output_feature_dim = enc_output.get_shape().as_list()[2]
enc_output = tf.reshape(enc_output, [-1, enc_output_feature_dim])
for i, layer in enumerate(self.layers):
layer_cache = cache["layer_%d" % i] if cache is not None else None
layer_in = layer(
layer_in,
dec_mask_rank3,
dec_inverse_normalizer,
enc_output,
enc_mask,
enc_inverse_normalizer,
cross_attn_mask,
step=step,
selected_beams=selected_beams,
cache=layer_cache)
return layer_in
class Model(tf.keras.layers.Layer):
"""Quantized transformer decoder."""
def __init__(self, config, mode):
super(Model, self).__init__()
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("intermediate_size")
_get_params("max_dec_time_step")
_get_params("max_enc_time_step")
_get_params("embedding_size")
_get_params("vocabulary_size")
_get_params("num_layers")
_get_params("labels")
_get_params("regularizer_scale")
_get_params("num_heads")
_get_params("model_dimension")
_get_params("beam_size", 1)
_get_params("quantize", True)
_get_params("cached_kv", False)
_get_params("attention_dropout_rate", 0.0)
_get_params("activation_dropout_rate", 0.0)
# If set, a separate dense layer is used to generate the logits instead of
# re-using the input embedding table.
_get_params("use_output_layer", False)
self.parameters = base_layers.Parameters(mode, self.quantize,
self.regularizer_scale)
# Activation/Normalization enabled on input bottleneck as there is no
# temporal information.
self.input_bottleneck = dense_layers.BaseQDenseVarLen(
self.model_dimension, rank=3, parameters=self.parameters)
self.output_bottleneck = dense_layers.BaseQDense(
self.embedding_size,
normalize=False,
activation=None,
bias=False,
parameters=self.parameters)
self.embedding = embedding_layers.EmbeddingFullyConnected(
shape=[self.vocabulary_size, self.embedding_size],
initializer=tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3)),
parameters=self.parameters)
if self.use_output_layer:
self.output_layer = dense_layers.BaseQDense(
self.vocabulary_size,
activation=None,
normalize=False,
bias=False,
parameters=self.parameters)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_dec_time_step, self.model_dimension],
initializer=tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3)),
parameters=self.parameters)
self.ln = normalization_layers.LayerNormalization(
parameters=self.parameters)
self.qact = quantization_layers.ActivationQuantization(
parameters=self.parameters)
# Scales the weights for computing logits.
self.logits_fc_weights_scale_factor = None
self.logits_fc_bias = self.add_weight(
"logits_fc_bias",
shape=[self.vocabulary_size],
initializer=tf.constant_initializer(0),
dtype="float32")
# Optional bias which can be used to mask logits output.
self.output_bias = None
self.transformer_uniform_attn_decoder = TransformerUniformAttnDecoderStack(
parameters=self.parameters,
num_layers=self.num_layers,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
max_time_step=self.max_dec_time_step,
num_heads=self.num_heads,
model_dimension=self.model_dimension,
vocabulary_size=self.vocabulary_size,
beam_size=self.beam_size,
cached_kv=self.cached_kv,
attention_dropout_rate=self.attention_dropout_rate,
activation_dropout_rate=self.activation_dropout_rate)
# Beam search output.
self.finished_seq = None
self.finished_scores = None
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
if self.parameters.mode == base_layers.TRAIN:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
elif self.parameters.mode in [base_layers.EVAL, base_layers.PREDICT]:
logits, predicted_ids = self.decode_beam_search(start_ids, eos_id, pad_id,
enc_output, enc_mask)
elif self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
else:
assert "Invalid mode."
return logits, predicted_ids
def training_inputs(self, input_ids, input_mask):
input_values = self.embedding(input_ids)
if self.embedding_size != self.model_dimension:
input_values = self.input_bottleneck(input_values, input_mask)
pos_indices = tf.cumsum(input_mask, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
pos_values = self.positional_embedding(pos_indices)
inputs = self.qact(self.ln(input_values + pos_values))
return inputs
def model_outputs(self, layer_in):
bsz = layer_in.get_shape().as_list()[0] or tf.shape(layer_in)[0]
layer_out = tf.reshape(layer_in, [-1, self.model_dimension])
if self.use_output_layer:
logits = self.output_layer(layer_out)
else:
if self.model_dimension != self.embedding_size:
layer_out = self.output_bottleneck(layer_out)
logits = self.embedding.fully_connected(
layer_out,
bias=self.logits_fc_bias,
weights_scale_factor=self.logits_fc_weights_scale_factor)
logits = tf.reshape(logits, [bsz, -1, self.vocabulary_size])
# Optional bias to mask out logits before applying argmax.
if self.output_bias is not None:
logits += self.output_bias
predicted_ids = tf.argmax(logits, axis=2, output_type=tf.int64)
return logits, predicted_ids
def decode_beam_search(self,
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model"):
batch_size = tf.shape(start_ids)[0]
cache = { # pylint: disable=g-complex-comprehension
"layer_%d" % layer: {
"uniform_avg": tf.zeros([batch_size, 1, self.model_dimension]),
} for layer in range(self.num_layers)
}
cache["logits"] = tf.zeros([batch_size, 0, self.vocabulary_size])
pos_indices = tf.range(self.max_dec_time_step, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
pos_values = self.positional_embedding(pos_indices)
def beam_search_tile(output, tile_pattern, final_shape):
x = tf.tile(output, tile_pattern)
x = tf.reshape(x, final_shape)
return x
enc_output_feature_dim = enc_output.get_shape().as_list()[2]
enc_output = beam_search_tile(
enc_output, [1, self.beam_size, 1],
[batch_size * self.beam_size, -1, enc_output_feature_dim])
enc_mask = beam_search_tile(enc_mask, [1, self.beam_size],
[batch_size * self.beam_size, -1])
def symbols_to_logits_fn(ids, step, cache):
"""Looks up ids to logits."""
logging.info("Running symbols to logits. ids=%s, step=%s, cache=%s", ids,
step, cache)
curr_id = ids[:, -1:]
with tf.name_scope(scope):
curr_embed = self.embedding(curr_id)
input_mask = tf.ones(tf.shape(curr_embed)[:-1], dtype=tf.float32)
if self.embedding_size != self.model_dimension:
curr_embed = self.input_bottleneck(curr_embed, input_mask)
inputs = self.qact(
self.ln(curr_embed + pos_values[:, step:step + 1, :]))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=step + 1,
cache=cache)
next_logits, _ = self.model_outputs(layer_out)
cache["logits"] = tf.concat([cache["logits"], next_logits], axis=1)
return next_logits, cache
self.finished_seq, self.finished_scores, states = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids=start_ids,
beam_size=self.beam_size,
decode_length=self.max_dec_time_step,
vocab_size=self.vocabulary_size,
alpha=0.6,
eos_id=eos_id,
states=cache)
beam_ids = self.finished_seq[:, 0, 1:]
beam_ids = tf.pad(
beam_ids, [[0, 0], [0, self.max_dec_time_step - tf.shape(beam_ids)[1]]],
constant_values=pad_id)
logits = states["logits"][:, 0, :, :]
logits = tf.pad(
logits,
[[0, 0], [0, self.max_dec_time_step - tf.shape(logits)[1]], [0, 0]],
constant_values=self.parameters.invalid_logit)
return logits, beam_ids
class ModelEvalWithGTLogitsAndPredictions(Model):
"""Model with EVAL mode logits and predictions based on ground truth inputs at each step."""
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
elif self.parameters.mode == base_layers.PREDICT:
logits, predicted_ids = self.decode_beam_search(
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model_eval_with_gt_logits_and_predictions")
elif self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
else:
assert "Invalid mode."
return logits, predicted_ids
class ModelEvalWithGTLogits(Model):
"""Model with EVAL mode logits computed based on ground truth input at each step."""
def call(self,
decode_ids,
decode_ids_mask,
enc_output,
enc_mask,
start_ids=None,
eos_id=None,
pad_id=None,
input_id=None,
time_step=None,
selected_beams=None):
logits = None
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
inputs = self.training_inputs(decode_ids, decode_ids_mask)
layer_out = self.transformer_uniform_attn_decoder(inputs, decode_ids_mask,
enc_output, enc_mask)
logits, predicted_ids = self.model_outputs(layer_out)
if self.parameters.mode in [base_layers.EVAL, base_layers.PREDICT]:
# EVAL mode predictions are based on beam search path.
_, predicted_ids = self.decode_beam_search(
start_ids,
eos_id,
pad_id,
enc_output,
enc_mask,
scope="model_eval_with_gt_logits")
if self.parameters.mode == base_layers.TFLITE:
input_values = self.embedding(input_id)
# time_step starts from 1.
pos_values = self.positional_embedding(time_step - 1)
pos_values = tf.reshape(pos_values, [-1, 1, self.embedding_size])
input_mask = tf.ones(tf.shape(input_values)[:-1], dtype=tf.float32)
inputs = self.qact(self.ln(input_values + pos_values))
layer_out = self.transformer_uniform_attn_decoder(
inputs,
input_mask,
enc_output,
enc_mask,
step=time_step,
selected_beams=selected_beams)
logits, predicted_ids = self.model_outputs(layer_out)
return logits, predicted_ids
| 20,439 | 38.535783 | 94 | py |
models | models-master/research/seq_flow_lite/models/prado.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of PRADO model."""
import copy
from absl import logging
import numpy as np
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import projection_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class PaddedMaskedVarLenConv(conv_layers.EncoderQConvolutionVarLen):
"""A layer that performs padded masked convolution."""
def __init__(self, invalid_value, ngram=2, skip_bigram=None, **kwargs):
self.invalid_value = invalid_value
assert ngram is None or (ngram >= 1 and ngram <= 5)
assert skip_bigram is None or skip_bigram == 1 or skip_bigram == 2
assert bool(ngram is None) != bool(skip_bigram is None)
self.kwidth = ngram if ngram is not None else (skip_bigram + 2)
mask = [1] * self.kwidth
self.skipgram = skip_bigram is not None
if skip_bigram is not None:
mask[1], mask[skip_bigram] = 0, 0
self.mask = np.array(mask, dtype="float32").reshape((1, self.kwidth, 1, 1))
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=[0, self.kwidth - 1])
super(PaddedMaskedVarLenConv, self).__init__(
ksize=self.kwidth, rank=3, padding="VALID", activation=None, **kwargs)
def call(self, inputs, mask, inverse_normalizer):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
maskr4 = tf.expand_dims(mask, axis=1)
inputs_padded = self.zero_pad(inputs)
result = super(PaddedMaskedVarLenConv, self).call(inputs_padded, maskr4,
inverse_normalizer)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
return result * mask + (1 - mask) * self.invalid_value
return result
def quantize_parameter(self, weight, num_bits=8):
weight = super(PaddedMaskedVarLenConv, self).quantize_parameter(
weight, num_bits=num_bits)
return weight * tf.convert_to_tensor(self.mask) if self.skipgram else weight
class AttentionPoolReduce(base_layers.BaseLayer):
"""Attention pooling and reduce."""
def __init__(self, filters, ngram=2, skip_bigram=None, **kwargs):
super(AttentionPoolReduce, self).__init__(**kwargs)
self.filters = filters
self.value = PaddedMaskedVarLenConv(
0, filters=filters, ngram=ngram, skip_bigram=skip_bigram, **kwargs)
self.attention_logits = PaddedMaskedVarLenConv(
self.parameters.invalid_logit,
filters=filters,
ngram=ngram,
skip_bigram=skip_bigram,
**kwargs)
def call(self, values_in, attention_in, mask, inverse_normalizer):
self._assert_rank_and_type(values_in, 3)
self._assert_rank_and_type(attention_in, 3)
self._assert_rank_and_type(mask, 3)
values = self.value(values_in, mask, inverse_normalizer)
attention_logits = self.attention_logits(attention_in, mask,
inverse_normalizer)
if self.parameters.mode == base_layers.TFLITE:
return tf_custom_ops_py.expected_value_op(attention_logits, values)
else:
attention_logits = tf.transpose(attention_logits, [0, 2, 1])
values = tf.transpose(values, [0, 2, 1])
attention = tf.nn.softmax(attention_logits)
return tf.reduce_sum(attention * values, axis=2)
class Encoder(tf.keras.layers.Layer):
"""A PRADO keras model."""
def __init__(self, config, mode, **kwargs):
super(Encoder, self).__init__(**kwargs)
def _get_params(varname, default_value=None):
value = config[varname] if varname in config else default_value
default = "" if varname in config else " (default)"
logging.info("%s = %s%s", varname, value, default)
setattr(self, varname, value)
_get_params("labels")
_get_params("quantize", True)
_get_params("embedding_regularizer_scale", 35e-3)
_get_params("embedding_size", 64)
_get_params("unigram_channels", 0)
_get_params("bigram_channels", 0)
_get_params("trigram_channels", 0)
_get_params("fourgram_channels", 0)
_get_params("fivegram_channels", 0)
_get_params("skip1bigram_channels", 0)
_get_params("skip2bigram_channels", 0)
_get_params("network_regularizer_scale", 1e-4)
_get_params("keep_prob", 1.0)
self.num_classes = len(self.labels)
self.parameters = base_layers.Parameters(
mode,
quantize=self.quantize,
regularizer_scale=self.embedding_regularizer_scale)
self.values_fc = dense_layers.BaseQDenseVarLen(
units=self.embedding_size, rank=3, parameters=self.parameters)
self.attention_fc = dense_layers.BaseQDenseVarLen(
units=self.embedding_size, rank=3, parameters=self.parameters)
self.parameters = copy.copy(self.parameters)
self.parameters.regularizer_scale = self.network_regularizer_scale
self.attention_pool_layers = []
self._add_attention_pool_layer(self.unigram_channels, 1)
self._add_attention_pool_layer(self.bigram_channels, 2)
self._add_attention_pool_layer(self.trigram_channels, 3)
self._add_attention_pool_layer(self.fourgram_channels, 4)
self._add_attention_pool_layer(self.fivegram_channels, 5)
self._add_attention_pool_layer(self.skip1bigram_channels, None, 1)
self._add_attention_pool_layer(self.skip2bigram_channels, None, 2)
self.concat_quantizer = quantization_layers.ConcatQuantization(
axis=1, parameters=self.parameters)
self.final_fc = dense_layers.BaseQDense(
units=self.num_classes,
rank=2,
parameters=self.parameters,
activation=None)
def _add_attention_pool_layer(self, channels, ngram, skip_bigram=None):
if channels > 0:
self.attention_pool_layers.append(
AttentionPoolReduce(
filters=channels,
skip_bigram=skip_bigram,
ngram=ngram,
parameters=self.parameters))
def _apply_fc_dropout(self, layer, inputs, mask, inverse_normalizer):
outputs = layer(inputs, mask, inverse_normalizer)
if self.parameters.mode == base_layers.TRAIN and self.keep_prob < 1.0:
return tf.nn.dropout(outputs, rate=(1 - self.keep_prob))
return outputs
def call(self, projection, seq_length):
mask = tf.sequence_mask(
seq_length, tf.shape(projection)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, axis=2)
values_in = self._apply_fc_dropout(self.values_fc, projection, mask,
inverse_normalizer)
attention_in = self._apply_fc_dropout(self.attention_fc, projection, mask,
inverse_normalizer)
tensors = [
layer(values_in, attention_in, maskr3, inverse_normalizer)
for layer in self.attention_pool_layers
]
assert tensors, "no ngram channels have been configured"
pre_logits = self.concat_quantizer(tensors)
return self.final_fc(pre_logits)
class Model(Encoder):
def __init__(self, config, mode, **kwargs):
super(Model, self).__init__(config, mode, **kwargs)
self.projection = projection_layers.ProjectionLayer(config, mode)
def call(self, inputs):
projection, seq_length = self.projection(inputs)
return super(Model, self).call(projection, seq_length)
| 8,180 | 40.739796 | 80 | py |
models | models-master/research/seq_flow_lite/models/sgnn/sgnn_test.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for seq_flow_lite.sgnn."""
import tensorflow as tf
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
from models import sgnn # import seq_flow_lite module
@test_util.run_all_in_graph_and_eager_modes
class SgnnTest(tf.test.TestCase):
def test_preprocess(self):
self.assertAllEqual(
sgnn.preprocess(
tf.constant([['Hello World!'], [u'你好'],
[u'مرحبا بالعالم']])),
[['hello'.encode(), 'world!'.encode()], [u'你好'.encode()],
[u'مرحبا'.encode(), u'بالعالم'.encode()]])
def test_get_ngram(self):
tokens = tf.ragged.constant([['hello', 'world'], [u'你好'],
[u'مرحبا', u'بالعالم']])
self.assertAllEqual(
sgnn.get_ngrams(tokens, 3),
[[
b'^he', b'hel', b'ell', b'llo', b'lo$', b'^wo', b'wor', b'orl',
b'rld', b'ld$'
], [u'^你好'.encode(), u'你好$'.encode()],
[
u'^مر'.encode(), u'مرح'.encode(), u'رحب'.encode(),
u'حبا'.encode(), u'با$'.encode(), u'^با'.encode(),
u'بال'.encode(), u'الع'.encode(), u'لعا'.encode(),
u'عال'.encode(), u'الم'.encode(), u'لم$'.encode()
]])
def test_project(self):
ngrams = tf.ragged.constant([[b'^h', b'he', b'el', b'll', b'lo', b'o$'],
[b'^h', b'hi', b'i$']])
self.assertAllClose(
sgnn.fused_project(ngrams, [5, 7], 0x7FFFFFFF),
[[0.448691, -0.238499], [-0.037561, 0.080748]])
self.assertAllClose(
sgnn.fused_project(ngrams, [5, 7], 0x7FFFFFFF),
sgnn.project(ngrams, [5, 7], 0x7FFFFFFF))
def test_sgnn(self):
self.assertAllClose(
sgnn.sgnn(tf.constant([['hello'], ['hi']]), [3, 5, 7], 2),
[[0.268503, 0.448691, -0.238499], [0.093143, -0.037561, 0.080748]])
def test_keras_model(self):
hparams = sgnn.Hparams(learning_rate=2e-4)
model = sgnn.keras_model([1, 2, 3, 4], 2, [100, 50], hparams)
self.assertIsNotNone(model)
if __name__ == '__main__':
tf.test.main()
| 2,771 | 36.972603 | 95 | py |
models | models-master/research/seq_flow_lite/models/sgnn/sgnn.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds SGNN model.
[1] Sujith Ravi and Zornitsa Kozareva. 2018. "Self-governing neural networks for
on-device short text
classification." In Proceedings of the 2018 Conference on Empirical Methods in
Natural Language
Processing, pages 887-893. Association for Computational Linguistics
The model will be constructed in this way:
* Projects text to float features, the size is defined by projection_size
* Fully connected layer predicts the class of predictions.
"""
import collections
import tensorflow.compat.v2 as tf
import tensorflow_text as tf_text
from tensorflow_lite_support.custom_ops.python import tflite_text_api
# Hparam collections that will be used to tune the model.
Hparams = collections.namedtuple(
'Hparams',
[
# Learning rate for the optimizer.
'learning_rate'
])
def preprocess(text):
"""Normalize the text, and return tokens."""
text = tf.reshape(text, [-1])
text = tf_text.case_fold_utf8(text)
tokenizer = tflite_text_api.WhitespaceTokenizer()
return tokenizer.tokenize(text)
def get_ngrams(tokens, n):
"""Generates character ngrams from tokens.
Args:
tokens: A string ragged tensor for tokens, in shape of [batch_size,
num_token].
n: ngram size for char ngrams.
Returns:
A string ragged tensor for ngrams, in shape of [batch_size, num_token,
ngrams].
"""
chars_split = tf.strings.unicode_split('^' + tokens + '$', 'UTF-8')
chars_joined = tflite_text_api.ngrams(
chars_split,
width=n,
axis=-1,
reduction_type=tf_text.Reduction.STRING_JOIN,
string_separator='')
flat_row_splits = tf.nn.embedding_lookup(chars_joined.values.row_splits,
chars_joined.row_splits)
return tf.RaggedTensor.from_row_splits(chars_joined.values.values,
flat_row_splits)
def project(ngrams, hash_seed, buckets):
"""Projects a ngram RaggedTensor to float tensor.
Args:
ngrams: A string ragged tensor, in shape of [batch_size, num_token, ngrams].
hash_seed: A python int list, in shape of [num_hash].
buckets: An int for the max value of projected integers.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, num_hash].
"""
num_hash = len(hash_seed)
# Hash ngrams string tensor to hash signatures.
signatures = tf.ragged.map_flat_values(tf.strings.to_hash_bucket_fast, ngrams,
buckets)
# Each ngram signature will be multiplied by a different hash seed,
# mod by hash buckets, and linear mapping.
# value = abs(signature * seed % bucket)
# if value > bucket / 2: value -= buckets
hash_tensor = tf.constant(hash_seed, dtype=tf.int64)
value = tf.math.floormod(
tf.abs(signatures.values * tf.reshape(hash_tensor, [-1, 1])), buckets)
value = value - tf.cast(tf.greater(value, buckets >> 1), tf.int64) * buckets
# Wrap values to ragged tensor, and calculates
# output_i,j = mean(value_i,j,k) for k-th ngram in i-th text
# computed with j-th hash seed
row_lengths = tf.repeat(
tf.reshape(signatures.row_lengths(), [1, -1]), num_hash, axis=0)
row_lengths = tf.cast(tf.reshape(row_lengths, [-1]), tf.int32)
result = tf.RaggedTensor.from_row_lengths(
tf.RaggedTensor.from_row_lengths(tf.reshape(value, [-1]), row_lengths),
tf.repeat(tf.shape(signatures.row_lengths()), num_hash))
result = tf.reduce_mean(result, 2) / (buckets >> 1)
return tf.transpose(tf.reshape(result.values, [num_hash, -1]))
def fused_project(ngrams, hash_seed, buckets):
"""A wrapper to fuse project method when converting to TFLite model.
Args:
ngrams: A string ragged tensor, in shape of [batch_size, num_token, ngrams].
hash_seed: A python int list, in shape of [num_hash].
buckets: An int for the max value of projected integers.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, num_hash].
"""
hash_seed_attr = ' '.join(['i: %d' % seed for seed in hash_seed])
experimental_implements = [
'name: "tftext:custom:SgnnProjection"',
'attr { key: "hash_seed" value { list {%s} } }' % hash_seed_attr,
'attr { key: "buckets" value { i: %d } }' % buckets,
]
experimental_implements = ' '.join(experimental_implements)
@tf.function(experimental_implements=experimental_implements)
def func(ngrams_values, *ngrams_row_splits):
ngrams = tf.RaggedTensor.from_nested_row_splits(
flat_values=ngrams_values, nested_row_splits=ngrams_row_splits)
return project(ngrams, hash_seed, buckets)
return func(ngrams.flat_values, *ngrams.nested_row_splits)
def sgnn(texts, hash_seed, ngram_size):
"""Projects the string text to float features.
It first generasts N ngrams of the tokens from given text,
then projects each ngram tensor with a partion of the seeds.
Args:
texts: a string tensor, in shape of [batch_size].
hash_seed: a list of integers, in shape of [projection_size].
ngram_size: max size of ngram to generate features.
Returns:
A float tensor that projects ngrams to the space represented by hash_seed,
in shape of [batch_size, projection_size].
"""
projection_size = len(hash_seed)
partition_size = int(projection_size / ((ngram_size + 1) * ngram_size / 2))
if partition_size == 0:
raise ValueError(
'projection size %d is not enough for %d ngram partitions' %
(projection_size, ngram_size))
indices = [int(i * (i + 1) / 2) * partition_size for i in range(ngram_size)]
indices.append(projection_size)
projection_layer = []
tokens = preprocess(texts)
for i in range(ngram_size):
ngram = get_ngrams(tokens, i + 1)
projection = fused_project(ngram, hash_seed[indices[i]:indices[i + 1]],
0x7FFFFFFF)
projection_layer.append(projection)
return tf.cast(tf.concat(projection_layer, -1), tf.float32)
class ProjectLayer(tf.keras.layers.Layer):
"""Projects the texts to a fixed sized features."""
def __init__(self, seed, ngram_size, **kwargs):
self.seed = seed
self.ngram_size = ngram_size
super(ProjectLayer, self).__init__(**kwargs)
def get_config(self):
return {
'seed': self.seed,
'ngram_size': self.ngram_size,
}
def call(self, x):
return sgnn(x, self.seed, self.ngram_size)
def compute_output_shape(self, input_shape):
return (input_shape[0], len(self.seed))
def keras_model(hash_seed, ngram_size, fc_size_list, hparams):
"""Compiles a keras model from projected features to labels.
Args:
hash_seed: a list of int used to project the feature.
ngram_size: maximum size of ngram to generate features from texts.
fc_size_list: a list of int, sizes of each fully connected layer.
hparams: hyper parameters for the model.
Returns:
A keras model that predicts the language id.
"""
if not fc_size_list:
raise ValueError(
'Must specify one or more fully connected layers via fc_size_list')
model = tf.keras.Sequential()
model.add(ProjectLayer(hash_seed, ngram_size))
for size in fc_size_list[:-1]:
model.add(tf.keras.layers.Dense(size))
model.add(tf.keras.layers.Dense(fc_size_list[-1], activation='softmax'))
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=hparams.learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
| 8,235 | 35.281938 | 80 | py |
models | models-master/research/seq_flow_lite/models/sgnn/train.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to train langid model.
The script builds language detection from wikipedia dataset,
builds SGNN model to train an on-device model to
predict the language of the given text.
"""
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from models import sgnn # import seq_flow_lite module
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', '/tmp/langid',
'Path for the output directory.')
flags.DEFINE_integer('projection_size', 600, 'Size of projection layer.')
flags.DEFINE_integer('ngram_size', 3, 'Max size of ngram to project features.')
flags.DEFINE_string('fc_layer', '256,128',
'Size of fully connected layer, separated by comma.')
flags.DEFINE_integer('batch_size', 160, 'Batch size for training.')
flags.DEFINE_integer('epochs', 10, 'Num of epochs for training.')
flags.DEFINE_float('learning_rate', 2e-4, 'learning rate for optimizer.')
LANGIDS = ['ar', 'en', 'es', 'fr', 'ru', 'zh']
def dataset_fn(batch_size, is_training, split, try_gcs, max_input_len):
"""Creates dataset to train and evaluate.
Args:
batch_size: Batch size for training or evaluation.
is_training: True if the dataset is for training.
split: Split of dataset, follow the pattern defined in
https://www.tensorflow.org/datasets/splits
try_gcs: True if loading the data from gcs.
max_input_len: Max length of input string.
Returns:
Dataset object.
"""
def _get_text(item):
return tf.strings.substr(item['text'], 0, max_input_len)
all_data = []
for idx, langid in enumerate(LANGIDS):
dataset = tfds.load(
'wikipedia/20190301.%s' % langid, try_gcs=try_gcs, split=split)
map_fn = lambda item: (_get_text(item), idx) # pylint: disable=cell-var-from-loop
dataset = dataset.map(map_fn)
all_data.append(dataset)
datasets = tf.data.experimental.sample_from_datasets(
all_data, [1. / len(all_data)] * len(LANGIDS))
repeat_count = None if is_training else 1
return datasets.cache().shuffle(100000).batch(batch_size).repeat(repeat_count)
def save_and_convert(model, output_dir):
"""Save keras model and convert to tflite."""
saved_model_path = os.path.join(output_dir, 'saved_model')
tf.saved_model.save(model, saved_model_path)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
converter.allow_custom_ops = True
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
data = converter.convert()
with open(os.path.join(output_dir, 'model.tflite'), 'wb') as f:
f.write(data)
def train_and_evaluate():
"""Train and evaluate the model."""
hash_seed = np.random.uniform(-1, 1, FLAGS.projection_size) * 0x7FFFFFFF
fc_layer = [int(fc) for fc in FLAGS.fc_layer.split(',')]
fc_layer.append(len(LANGIDS) + 1)
hparams = sgnn.Hparams(learning_rate=FLAGS.learning_rate)
model = sgnn.keras_model(hash_seed, FLAGS.ngram_size, fc_layer, hparams)
model.fit(
dataset_fn(FLAGS.batch_size, True, 'train[:10%]', True, 100),
epochs=FLAGS.epochs,
steps_per_epoch=1000,
validation_steps=100,
validation_data=dataset_fn(FLAGS.batch_size, False, 'train[10:11%]', True,
100),
)
save_and_convert(model, FLAGS.output_dir)
def main(_):
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
train_and_evaluate()
if __name__ == '__main__':
app.run(main)
| 4,226 | 33.647541 | 86 | py |
models | models-master/research/seq_flow_lite/layers/base_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base layer for building models trained with quantization."""
import tensorflow as tf
TRAIN = "train"
EVAL = "eval"
PREDICT = "infer"
TFLITE = "tflite"
_MODE = [TRAIN, EVAL, PREDICT, TFLITE]
class Parameters:
"""A class that encapsulates parameters."""
def __init__(self,
mode,
quantize=True,
regularizer_scale=0.0,
invalid_logit=-1e6,
initializer=None):
assert isinstance(quantize, bool)
self.quantize = quantize
assert mode in _MODE
self.mode = mode
self.regularizer_scale = regularizer_scale
self.invalid_logit = invalid_logit
self.initializer = initializer
class BaseLayer(tf.keras.layers.Layer):
"""Base class for encoders."""
def __init__(self, parameters, **kwargs):
assert isinstance(parameters, Parameters)
self.parameters = parameters
super(BaseLayer, self).__init__(**kwargs)
def _assert_rank_and_type(self, tensor, rank, dtype=tf.float32):
assert len(tensor.get_shape().as_list()) == rank
assert tensor.dtype == dtype
def add_weight_wrapper(self, shape):
"""Return a weight variable for the given shape."""
if self.parameters.initializer is not None:
initializer = clone_initializer(self.parameters.initializer)
else:
initializer = tf.keras.initializers.GlorotUniform()
weight = self.add_weight(
"weight",
shape,
initializer=initializer,
trainable=True,
dtype=tf.float32)
self.add_reg_loss(weight)
return weight
def quantize_parameter(self, tensor, num_bits=8):
"""Quantize parameters when enabled."""
# For infer mode, toco computes the min/max from the weights offline to
# quantize it. During train/eval this is computed from the current value
# in the session by the graph itself.
if self.parameters.quantize and self.parameters.mode in [TRAIN, EVAL]:
# Toco expects 0.0 to be part of the quantization range.
batch_min = tf.minimum(tf.reduce_min(tensor), 0.0)
batch_max = tf.maximum(tf.reduce_max(tensor), 0.0)
return tf.quantization.fake_quant_with_min_max_vars(
tensor, batch_min, batch_max, num_bits=num_bits)
else:
return tensor
def add_bias(self, shape):
weight = self.add_weight(
"bias",
shape,
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.add_reg_loss(weight)
return weight
def add_reg_loss(self, weight):
if self.parameters.regularizer_scale > 0.0:
reg_scale = tf.convert_to_tensor(self.parameters.regularizer_scale)
reg_loss = tf.nn.l2_loss(weight) * reg_scale
self.add_loss(reg_loss)
def assign_moving_average(self, var, update, ema_decay):
return var.assign(var.read_value() * (1 - ema_decay) + (ema_decay) * update)
def quantize_tensor(self, tf_only):
if tf_only and self.parameters.mode == TFLITE:
return False
return self.parameters.quantize
def qrange_sigmoid(self, tensor, tf_only=False):
if self.quantize_tensor(tf_only):
return tf.quantization.fake_quant_with_min_max_args(tensor, 0.0, 1.0)
return tensor
def qrange_tanh(self, tensor, tf_only=False):
if self.quantize_tensor(tf_only):
return tf.quantization.fake_quant_with_min_max_args(tensor, -1.0, 1.0)
return tensor
def quantized_tanh(self, tensor, tf_only=False):
return self.qrange_tanh(tf.tanh(tensor), tf_only)
def quantized_sigmoid(self, tensor, tf_only=False):
return self.qrange_sigmoid(tf.sigmoid(tensor), tf_only)
def get_batch_dimension(self, tensor):
return tensor.get_shape().as_list()[0] or tf.shape(tensor)[0]
def inverse_normalizer(self, mask):
return tf.math.reciprocal(tf.reduce_sum(mask))
def random_drop_to_zero(self, tensor, zero_probability):
rnd = tf.random.uniform(
shape=tf.shape(tensor),
minval=-zero_probability,
maxval=(1.0 - zero_probability),
dtype=tensor.dtype)
return tf.math.ceil(rnd)
def clone_initializer(initializer):
if isinstance(initializer, tf.keras.initializers.Initializer):
return initializer.__class__.from_config(initializer.get_config())
return initializer
| 4,899 | 33.027778 | 80 | py |
models | models-master/research/seq_flow_lite/layers/misc_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for embedding."""
import math
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class AttentionPooling(base_layers.BaseLayer):
"""A basic attention pooling layer."""
def __init__(self, scalar=True, normalize=True, **kwargs):
self.scalar = scalar
# Attention logits should not have activation post linear layer so it can
# be positive or negative. This would enable the attention distribution to
# be anything that the network likes. Using relu activation makes the
# attention distribution biased towards uniform distribution.
# This gets better results for attention pooling. Though some outputs are
# emphasized for making classification decision, all other outputs have
# a non zero probability of influencing the class. This seems to result
# in better backprop.
self.attention = dense_layers.BaseQDenseVarLen(
units=1, rank=3, normalize=normalize, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(AttentionPooling, self).__init__(**kwargs)
def build(self, input_shapes):
self.feature_size = input_shapes[-1]
def call(self, inputs, mask, inverse_normalizer):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
batch_size = self.get_batch_dimension(inputs)
attn_logits = self.attention(inputs, mask, inverse_normalizer)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
invalid_mask = (1 - mask) * self.parameters.invalid_logit
attn_logits = attn_logits * mask + invalid_mask
attn_logits = tf.reshape(attn_logits, [batch_size, -1])
attention = tf.nn.softmax(attn_logits, axis=-1)
attention = self.qrange_sigmoid(attention, tf_only=True)
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = tf.reshape(inputs, [-1, self.feature_size])
else:
attention = tf.expand_dims(attention, axis=1)
pre_logits = self.qactivation(tf.matmul(attention, inputs))
return tf.reshape(pre_logits, [batch_size, self.feature_size])
class TreeInductionLayer(base_layers.BaseLayer):
"""A basic tree induction layer."""
def __init__(self, **kwargs):
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(TreeInductionLayer, self).__init__(**kwargs)
def call(self, keys, queries, sequence_length):
key_dim = keys.get_shape().as_list()[-1]
query_dim = queries.get_shape().as_list()[-1]
assert key_dim == query_dim, "Last dimension of keys/queries should match."
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
sequence_mask = tf.sequence_mask(
sequence_length, maxlen=tf.shape(keys)[1], dtype=tf.float32)
sequence_mask = tf.expand_dims(sequence_mask, axis=2)
attn_mask = tf.matmul(sequence_mask, sequence_mask, transpose_b=True)
attn_logits = self.qactivation(tf.matmul(keys, queries, transpose_b=True))
invalid_attn_mask = (1 - attn_mask) * self.parameters.invalid_logit
return attn_logits * attn_mask + invalid_attn_mask
else:
assert self.get_batch_dimension(keys) == 1
assert self.get_batch_dimension(queries) == 1
keys = tf.reshape(keys, [-1, key_dim])
queries = tf.reshape(queries, [-1, key_dim])
result = self.qactivation(tf.matmul(keys, queries, transpose_b=True))
# TODO(b/171063452): Bug needs to be fixed to handle this correctly.
# seq_dim = tf.shape(result)[1]
# result = tf.reshape(result, [1, seq_dim, seq_dim])
return result
class GBSTLayerV2(base_layers.BaseLayer):
"""Tokenization layer."""
def __init__(self,
feature_size,
max_seq_len,
downsample_rate=2,
max_subword_block_width=4,
conv_kernel_size=5,
block_mixing_mode=None,
add_block_pos_embed=False,
**kwargs):
super(GBSTLayerV2, self).__init__(**kwargs)
self.feature_size = feature_size
self.max_seq_len = max_seq_len
self.downsample_rate = downsample_rate
self.subword_blocks_width = [1, 2, 3, 4]
self.max_subword_block_width = len(self.subword_blocks_width)
self.block_mixing_mode = block_mixing_mode
self.add_block_pos_embed = add_block_pos_embed
if self.add_block_pos_embed:
self.block_pos_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_subword_block_width, self.feature_size], **kwargs)
self.conv_kernel_size = conv_kernel_size
self.conv_layer = conv_layers.EncoderQConvolution(
filters=feature_size,
ksize=conv_kernel_size,
rank=3,
padding="VALID",
activation=None,
**kwargs)
padding = [conv_kernel_size - 1, 0]
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=padding)
self.block_attn = dense_layers.BaseQDense(
units=1,
rank=3,
activation=None,
normalize=False,
quantize_output=False,
**kwargs)
self.scores_concat = quantization_layers.ConcatQuantization(
axis=3, **kwargs)
self.attn_concat = quantization_layers.ConcatQuantization(axis=0, **kwargs)
self.qact = quantization_layers.ActivationQuantization(**kwargs)
self.qact_dot = quantization_layers.ActivationQuantization(**kwargs)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, seq_length):
"""Performs downsampling on the character-scale input representation.
Based in principle on https://arxiv.org/pdf/2106.12672.pdf.
Args:
inputs: float Tensor of shape [batch_size, seq_length, embedding_size].
seq_length: sequence length of shape [batch_size].
Returns:
<float>[batch_size, seq_length / downsample_rate, embedding_size].
Downsampled sequences.
"""
self._assert_rank_and_type(inputs, 3)
bsz = self.get_batch_dimension(inputs)
max_seq_len = self.max_seq_len
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
num_steps = tf.shape(inputs)[1]
inputs = self.zero_pad(inputs)
inputs = self.conv_layer(inputs)
all_block_scores = []
all_sequences = []
for subword_len in self.subword_blocks_width:
if self.add_block_pos_embed:
block_pos_indices = tf.range(subword_len, dtype=tf.int32)
block_pos_indices = tf.reshape(block_pos_indices, [1, -1])
block_pos_embeds = self.block_pos_embedding(block_pos_indices)
tile_len = math.ceil(max_seq_len / float(subword_len))
retiled_block_pos_embeds = tf.repeat(block_pos_embeds, tile_len, axis=1)
inputs += retiled_block_pos_embeds
# For this block size, form candidate block embeddings and scores.
# candidates shape: [batch, seq_len/subword_len, dim]
# block_scores shape: [batch, seq_len/subword_len, 1]
candidates = tf.nn.avg_pool(
inputs, [subword_len], strides=[subword_len], padding="SAME")
candidates = self.conv_layer.quantize_using_output_range(candidates)
block_scores = self.block_attn(candidates)
# Upsample it back to the original sequence length.
retiled_seq = tf.repeat(candidates, subword_len, axis=1)
retiled_block_scores = tf.repeat(block_scores, subword_len, axis=1)
# Make sure everything is the right length and add new dimension to concat
# candidate blocks on.
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
retiled_block_scores = retiled_block_scores[:, :num_steps, :]
retiled_seq = retiled_seq[:, :num_steps, :]
else:
retiled_block_scores = retiled_block_scores[:, :max_seq_len, :]
retiled_seq = retiled_seq[:, :max_seq_len, :]
retiled_seq = tf.expand_dims(retiled_seq, axis=-1)
retiled_block_scores = tf.expand_dims(retiled_block_scores, axis=-1)
all_sequences.append(retiled_seq)
all_block_scores.append(retiled_block_scores)
block_net = self.scores_concat(all_block_scores)
if self.block_mixing_mode == "score_attention":
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
block_attn_steps = []
self.attn_concat(None)
for i in range(num_steps):
block_i = tf.reshape(block_net[:, i:i + 1, :, :], [1, -1])
block_attn_steps.append(tf.matmul(block_i, block_i, transpose_b=True))
block_attn = self.attn_concat(block_attn_steps)
block_attn = tf.reshape(block_attn, [bsz, -1, 1, 1])
else:
block_attn = self.attn_concat(
[tf.matmul(block_net, block_net, transpose_b=True)])
block_attn = tf.nn.softmax(block_attn, axis=1)
block_attn = self.qrange_sigmoid(block_attn, tf_only=True)
block_net_scaled = self.qact(block_attn * block_net)
else:
block_net_scaled = block_net
candidate_embeds = self.conv_layer.quantize_using_output_range(
tf.concat(all_sequences, axis=3))
dot_product = self.qact_dot(block_net_scaled * candidate_embeds)
output = self.qoutput(tf.reduce_mean(dot_product, axis=-1, keepdims=True))
output = tf.reshape(output, [bsz, -1, self.feature_size])
# Removing pad entries for inference mode.
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
output = output[:, :num_steps, :]
# Downsample by mean pooling.
if self.downsample_rate > 1:
output = tf.nn.avg_pool(
output, (self.downsample_rate,),
strides=(self.downsample_rate,),
padding="VALID")
return output
| 10,559 | 42.636364 | 80 | py |
models | models-master/research/seq_flow_lite/layers/dense_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic dense layers."""
import copy
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class BaseQDense(base_layers.BaseLayer):
"""Quantized encoder dense layers."""
def __init__(self,
units,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=2,
normalize=True,
quantize_output=True,
normalization_fn=None,
**kwargs):
self.units = units
self.rank = rank
assert rank >= 2 and rank <= 4
self.activation = activation
self.bias = bias
self.normalize = normalize
self.quantize_output = quantize_output
if quantize_output:
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(normalization_fn=normalization_fn, **kwargs)
super(BaseQDense, self).__init__(**kwargs)
def build(self, input_shapes):
assert len(input_shapes) == self.rank
if self.rank == 4:
assert input_shapes[1] == 1 or input_shapes[2] == 1
self.in_units = input_shapes[-1]
shape = [self.in_units, self.units]
self.w = self.add_weight_wrapper(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.units])
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.BatchNormalization(**kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def _dense_r2(self, inputs, normalize_method):
outputs = tf.matmul(inputs, self.quantize_parameter(self.w))
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
if self.normalize:
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
if self.quantize_output:
return self.qoutput(outputs)
else:
return outputs
def _dense_r34(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
outputs = tf.reshape(inputs, [-1, self.in_units])
outputs = self._dense_r2(outputs, normalize_method)
if self.rank == 3:
return tf.reshape(outputs, [bsz, -1, self.units])
elif inputs.get_shape().as_list()[1] == 1:
return tf.reshape(outputs, [bsz, 1, -1, self.units])
else:
return tf.reshape(outputs, [bsz, -1, 1, self.units])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 2:
return self._dense_r2(inputs, normalize_method)
return self._dense_r34(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class BaseQDenseVarLen(BaseQDense):
"""Dense on variable length sequence."""
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=2, **kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = self.inverse_normalizer(mask)
def normalize_method(tensor):
maskr2 = tf.reshape(mask, [-1, 1])
return self.normalization(tensor, maskr2, inverse_normalizer)
return self._do_call(inputs, normalize_method)
| 4,317 | 34.105691 | 80 | py |
models | models-master/research/seq_flow_lite/layers/qrnn_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for QRNN."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import conv_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
QUASI_RNN_POOLING_F = "f"
QUASI_RNN_POOLING_FO = "fo"
QUASI_RNN_POOLING_IFO = "ifo"
_QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP = {
QUASI_RNN_POOLING_F: 2,
QUASI_RNN_POOLING_FO: 3,
QUASI_RNN_POOLING_IFO: 4,
}
class QRNNUnidirectionalPoolingCore(base_layers.BaseLayer):
"""Create a unidirectional QRNN pooling inner loop."""
def __init__(self, forward=True, **kwargs):
self.forward = forward
super(QRNNUnidirectionalPoolingCore, self).__init__(**kwargs)
def call(self, multiplier, constant):
if self.parameters.mode != base_layers.TFLITE:
return self._qrnn_pooling(multiplier, constant)
else:
return tf_custom_ops_py.pooling_op(multiplier, constant,
[1.0 if self.forward else 0.0])
def _qrnn_pooling(self, multipler, constant):
"""Pooling step computes the internal states for all timesteps."""
assert multipler.get_shape().as_list() == constant.get_shape().as_list()
gate_static_shape = multipler.get_shape().as_list()
gate_shape = tf.shape(multipler)
feature_size = gate_static_shape[2]
assert feature_size is not None
batch_size = gate_static_shape[0] or gate_shape[0]
max_timestep = gate_static_shape[1] or gate_shape[1]
dynamic_loop = gate_static_shape[1] is None
# Get multiplier/constant in [timestep, batch, feature_size] format
multiplier_transposed = tf.transpose(multipler, [1, 0, 2])
constant_transposed = tf.transpose(constant, [1, 0, 2])
# Start state
state = tf.zeros((batch_size, feature_size), tf.float32)
if dynamic_loop:
# One pooling step
def _step(index, state, states):
m = multiplier_transposed[index, :, :]
c = constant_transposed[index, :, :]
new_state = state * m + c
next_index = index + 1 if self.forward else index - 1
return next_index, new_state, states.write(index, new_state)
# Termination condition
def _termination(index, state, states):
del state, states
return (index < max_timestep) if self.forward else (index >= 0)
states = tf.TensorArray(tf.float32, size=max_timestep)
index = 0 if self.forward else max_timestep - 1
# Dynamic pooling loop
_, state, states = tf.while_loop(_termination, _step,
[index, state, states])
states = states.stack()
else:
# Unstack them to process one timestep at a time
multiplier_list = tf.unstack(multiplier_transposed)
constant_list = tf.unstack(constant_transposed)
states = []
# Unroll either forward or backward based on the flag `forward`
timesteps = list(range(max_timestep)) if self.forward else reversed(
list(range(max_timestep)))
# Static pooling loop
for time in timesteps:
state = state * multiplier_list[time] + constant_list[time]
states.append(state)
# Stack them back in the right order
states = tf.stack(states if self.forward else list(reversed(states)))
# Change to [batch, timestep, feature_size]
return tf.transpose(states, [1, 0, 2])
class QRNNUnidirectionalPooling(base_layers.BaseLayer):
"""Create a unidirectional QRNN pooling."""
def __init__(self,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
**kwargs):
self.zoneout_probability = zoneout_probability
self.pooling = pooling
self.forward = forward
self.output_quantized = output_quantized
if output_quantized and self.pooling == QUASI_RNN_POOLING_IFO:
self.qoutputs = quantization_layers.ActivationQuantization()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.pooling_core = QRNNUnidirectionalPoolingCore(forward=forward, **kwargs)
super(QRNNUnidirectionalPooling, self).__init__(**kwargs)
def call(self, gates, mask):
return self._create_qrnn_pooling_unidirectional(gates, mask)
def _qrnn_preprocess(self, gates):
"""Preprocess the gate inputs to the pooling layer."""
assert self.num_gates == len(gates)
dim = lambda tensor, index: tensor.get_shape().as_list()[index]
for tensor in gates:
assert len(tensor.get_shape().as_list()) == 3
for idx in range(3):
assert dim(gates[0], idx) == dim(tensor, idx)
if self.pooling == QUASI_RNN_POOLING_F:
z = self.quantized_tanh(gates[0], tf_only=True)
f = self.quantized_sigmoid(gates[1], tf_only=True)
return f, self.qrange_tanh(self.qrange_sigmoid(1 - f) * z), 1
elif self.pooling == QUASI_RNN_POOLING_FO:
z = self.quantized_tanh(gates[0], tf_only=True)
f = self.quantized_sigmoid(gates[1], tf_only=True)
o = self.quantized_sigmoid(gates[2], tf_only=True)
return f, self.qrange_tanh(self.qrange_sigmoid(1 - f) * z), o
else: # self.pooling == QUASI_RNN_POOLING_IFO:
z = self.quantized_tanh(gates[0], tf_only=True)
i = self.quantized_sigmoid(gates[1], tf_only=True)
f = self.quantized_sigmoid(gates[2], tf_only=True)
o = self.quantized_sigmoid(gates[3], tf_only=True)
return f, self.qrange_tanh(i * z), o
def _qrnn_postprocess(self, states, multiplier):
"""Postprocess the states and return the output tensors."""
if self.pooling == QUASI_RNN_POOLING_F:
return states
elif self.pooling == QUASI_RNN_POOLING_FO:
return self.qrange_tanh(states) * multiplier
else: # self.pooling == QUASI_RNN_POOLING_IFO
return self.qoutputs(states) * multiplier
def _qrnn_zoneout(self, multipler, constant):
"""Zoneout regularization for Quasi RNN."""
enable_zoneout = self.zoneout_probability > 0.0
if enable_zoneout and self.parameters.mode == base_layers.TRAIN:
# zoneout_mask is 1.0 with self.zoneout_probability and 0.0 with
# probability (1 - self.zoneout_probability)
zoneout_mask = tf.random.uniform(tf.shape(multipler), maxval=1.0)
zoneout_mask = tf.floor(zoneout_mask + self.zoneout_probability)
# When zoneout_mask is 1.0, do not update the state, retain the old state.
# This is achieved by making the multiplier 1.0 and constant 0.0.
# When zoneout_mask is 0.0 the multiplier and constant are unaffected.
# multipler is expected to be in the range [0.0, 1.0]. This is true since
# it is the result of a sigmoid.
multipler = tf.maximum(zoneout_mask, multipler)
constant *= (1 - zoneout_mask)
return multipler, constant
def _create_qrnn_pooling_unidirectional(self, gates, mask):
"""Create QRNN Pooling in either forward or backward direction."""
m1, c1, outgate = self._qrnn_preprocess(gates)
# For inference zero padding will not be used. Hence sequence length is
# not necessary.
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
m1 = m1 * mask + (1 - mask) * tf.ones_like(m1)
c1 *= mask
m1, c1 = self._qrnn_zoneout(m1, c1)
states = self.pooling_core(m1, c1)
outputs = self._qrnn_postprocess(states, outgate)
# For inference zero padding will not be used. Hence sequence length is
# not necessary.
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs *= mask
if self.output_quantized:
if self.pooling in [QUASI_RNN_POOLING_FO, QUASI_RNN_POOLING_F]:
outputs = self.qrange_tanh(outputs)
else:
outputs = self.qoutputs.quantize_using_range(outputs)
return outputs
class QRNNUnidirectional(base_layers.BaseLayer):
"""Create a unidirectional QRNN encoder."""
def __init__(self,
kwidth,
state_size,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
normalization_fn=None,
**kwargs):
self.forward = forward
self.kwidth = kwidth
self.pooling = pooling
self.state_size = state_size
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
self.gate_layers = []
for _ in range(self.num_gates):
self.gate_layers.append(
conv_layers.EncoderQConvolutionVarLen(
filters=state_size,
ksize=kwidth,
rank=3,
padding="VALID",
activation=None,
normalization_fn=normalization_fn,
**kwargs))
padding = [kwidth - 1, 0] if forward else [0, kwidth - 1]
self.zero_pad = tf.keras.layers.ZeroPadding1D(padding=padding)
self.qrnn_pooling = QRNNUnidirectionalPooling(
forward=forward,
zoneout_probability=zoneout_probability,
output_quantized=output_quantized,
pooling=pooling,
**kwargs)
super(QRNNUnidirectional, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
maskr4 = tf.expand_dims(mask, axis=1)
padded_inputs = self.zero_pad(inputs)
gates = [
layer(padded_inputs, maskr4, inverse_normalizer)
for layer in self.gate_layers
]
return self.qrnn_pooling(gates, mask)
class QRNNUnidirectionalWithBottleneck(base_layers.BaseLayer):
"""Create a unidirectional QRNN encoder with bottlenecks."""
def __init__(self,
kwidth,
state_size,
bottleneck_size,
zoneout_probability=0.0,
forward=True,
pooling=QUASI_RNN_POOLING_FO,
output_quantized=True,
**kwargs):
self.bottleneck_size = bottleneck_size
self.state_size = state_size
self.forward = forward
self.kwidth = kwidth
self.pooling = pooling
self.state_size = state_size
assert pooling in _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP.keys()
self.num_gates = _QUASI_RNN_POOLING_TO_NUMBER_OF_GATES_MAP[pooling]
self.qrnn_pooling = QRNNUnidirectionalPooling(
forward=forward,
zoneout_probability=zoneout_probability,
output_quantized=output_quantized,
pooling=pooling,
**kwargs)
self.pre_conv_layers = []
self.gate_layers = []
self.post_conv_layers = []
for _ in range(self.num_gates):
self.pre_conv_layers.append(
dense_layers.BaseQDense(bottleneck_size, rank=3, **kwargs))
self.gate_layers.append(
conv_layers.EncoderQConvolution(
filters=bottleneck_size,
ksize=kwidth,
rank=3,
padding="SAME",
normalization_fn=None,
**kwargs))
self.post_conv_layers.append(
dense_layers.BaseQDense(
state_size, rank=3, activation=None, **kwargs))
super(QRNNUnidirectionalWithBottleneck, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
pre_conv_out = [layer(inputs) for layer in self.pre_conv_layers]
gates = [layer(pre_conv_out[i]) for i, layer in enumerate(self.gate_layers)]
post_conv_out = [
layer(gates[i]) for i, layer in enumerate(self.post_conv_layers)
]
return self.qrnn_pooling(post_conv_out, mask)
class QRNNBidirectional(base_layers.BaseLayer):
"""Create a bidirectional QRNN encoder."""
def __init__(self,
kwidth,
state_size,
zoneout_probability=0.0,
pooling=QUASI_RNN_POOLING_FO,
bottleneck_size=None,
normalization_fn=None,
**kwargs):
self.pooling = pooling
if bottleneck_size is None:
self.forward = QRNNUnidirectional(
kwidth=kwidth,
state_size=state_size,
forward=True,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
normalization_fn=normalization_fn,
**kwargs)
self.backward = QRNNUnidirectional(
kwidth=kwidth,
state_size=state_size,
forward=False,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
normalization_fn=normalization_fn,
**kwargs)
else:
assert normalization_fn is None, (
"normalization_fn will not take an effect")
self.forward = QRNNUnidirectionalWithBottleneck(
kwidth=kwidth,
state_size=state_size,
bottleneck_size=bottleneck_size,
forward=True,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
**kwargs)
self.backward = QRNNUnidirectionalWithBottleneck(
kwidth=kwidth,
state_size=state_size,
bottleneck_size=bottleneck_size,
forward=False,
output_quantized=False,
zoneout_probability=zoneout_probability,
pooling=pooling,
**kwargs)
self.qconcat = quantization_layers.ConcatQuantization(axis=2, **kwargs)
super(QRNNBidirectional, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer=None):
if inverse_normalizer is None:
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
fwd_outputs = self.forward(inputs, mask, inverse_normalizer)
bwd_outputs = self.backward(inputs, mask, inverse_normalizer)
if self.pooling in [QUASI_RNN_POOLING_FO, QUASI_RNN_POOLING_F]:
outputs = [self.qrange_tanh(fwd_outputs), self.qrange_tanh(bwd_outputs)]
outputs = self.qrange_tanh(tf.concat(outputs, axis=2))
else:
outputs = self.qconcat([fwd_outputs, bwd_outputs])
return outputs
class QRNNBidirectionalStack(base_layers.BaseLayer):
"""Create a stack of bidirectional QRNN encoder."""
def __init__(self,
num_layers,
kwidth,
state_size,
zoneout_probability=0.0,
layerwise_decaying_zoneout=True,
pooling=QUASI_RNN_POOLING_FO,
bottleneck_size=None,
normalization_fn=None,
**kwargs):
self.layers = []
zp = zoneout_probability
for idx in range(num_layers):
if layerwise_decaying_zoneout:
zp = (zoneout_probability**(idx + 1))
self.layers.append(
QRNNBidirectional(
kwidth=kwidth,
state_size=state_size,
zoneout_probability=zp,
pooling=pooling,
bottleneck_size=bottleneck_size,
normalization_fn=normalization_fn,
**kwargs))
super(QRNNBidirectionalStack, self).__init__(**kwargs)
def call(self, inputs, maskr3, inverse_normalizer):
return self._apply_qrnn_stack(inputs, maskr3, inverse_normalizer)
def _apply_qrnn_stack(self, inputs, mask3, inverse_normalizer):
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask3
for layer in self.layers:
outputs = layer(inputs, mask3, inverse_normalizer)
inputs = outputs
return outputs
class QRNNBidirectionalStackWithSeqLength(QRNNBidirectionalStack):
def call(self, inputs, sequence_length):
mask = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask))
maskr3 = tf.expand_dims(mask, 2)
return self._apply_qrnn_stack(inputs, maskr3, inverse_normalizer)
| 17,000 | 36.864143 | 80 | py |
models | models-master/research/seq_flow_lite/layers/embedding_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for embedding."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class EmbeddingLayer(base_layers.BaseLayer):
"""Embedding layer."""
def __init__(self,
shape,
num_bits=8,
initializer=None,
trainable=True,
**kwargs):
self.shape = shape
self.quantizer = quantization_layers.ActivationQuantization(
num_bits=num_bits, **kwargs)
super(EmbeddingLayer, self).__init__(**kwargs)
if initializer is None:
initializer = tf.keras.initializers.GlorotUniform()
self.initializer = initializer
self.trainable = trainable
def build(self, input_shapes):
self.embedding_table = self.add_weight(
name="embedding_table",
shape=self.shape,
initializer=self.initializer,
trainable=self.trainable,
dtype=tf.float32)
if self.trainable:
self.add_reg_loss(self.embedding_table)
def call(self, indices):
assert indices.dtype in [tf.int64, tf.int32]
outputs = tf.nn.embedding_lookup(self.embedding_table, indices)
return self.quantizer(outputs)
class EmbeddingFullyConnected(EmbeddingLayer):
"""Uses embedding table as weights in a fully connected op."""
def __init__(self, **kwargs):
shape = kwargs.pop("shape", None)
initializer = kwargs.pop("initializer", None)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
super(EmbeddingFullyConnected, self).__init__(
shape=shape, initializer=initializer, **kwargs)
def fully_connected(self, inputs, bias=None, weights_scale_factor=None):
# This method can only be called after a call to "call" method in this class
self._assert_rank_and_type(inputs, 2)
weights = self.embedding_table
if weights_scale_factor is not None:
weights = weights * weights_scale_factor
outputs = tf.matmul(inputs, weights, transpose_b=True)
if bias is not None:
outputs = tf.nn.bias_add(outputs, bias)
return self.qoutput(outputs)
| 2,814 | 36.039474 | 80 | py |
models | models-master/research/seq_flow_lite/layers/normalization_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for normalization."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class BatchNormalization(base_layers.BaseLayer):
"""A class that applies batch normalization to the input tensor."""
def __init__(self, ema_decay=0.999, **kwargs):
self.ema_decay = ema_decay
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shapes):
self.reduce_dims = list(range(len(input_shapes) - 1))
shape = [input_shapes[-1]]
self.offset = self.add_weight(
"offset",
shape=shape,
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.scale = self.add_weight(
"scale",
shape=shape,
initializer=tf.keras.initializers.Ones(),
trainable=True)
self.mva_mean = self.add_weight(
"mva_mean",
shape=shape,
initializer=tf.keras.initializers.Zeros(),
trainable=False)
self.mva_var = self.add_weight(
"mva_variance",
shape=shape,
initializer=tf.keras.initializers.Ones(),
trainable=False)
def call(self, inputs):
mean_mom, var_mom = None, None
if self.parameters.mode == base_layers.TRAIN:
mean_mom, var_mom = tf.nn.moments(inputs, self.reduce_dims)
return self._batch_norm(inputs, mean_mom, var_mom)
def _batch_norm(self, inputs, mean_mom, var_mom):
if self.parameters.mode == base_layers.TRAIN:
# During training compute summay stats, update them to moving average
# variables and use the summary stas for batch normalization.
with tf.control_dependencies([
self.assign_moving_average(self.mva_mean, mean_mom, self.ema_decay),
self.assign_moving_average(self.mva_var, var_mom, self.ema_decay)
]):
tensor = tf.nn.batch_normalization(inputs, mean_mom, var_mom,
self.offset, self.scale, 1e-9)
else:
# During eval/inference use the moving average variable for batch
# normalization. The variables would be frozen to constants before
# saving graph.
tensor = tf.nn.batch_normalization(inputs, self.mva_mean, self.mva_var,
self.offset, self.scale, 1e-9)
return tensor
class VarLenBatchNormalization(BatchNormalization):
"""A class that applies batch normalization to the input tensor."""
def __init__(self, rank=2, **kwargs):
self.rank = rank
assert rank == 2 or rank == 4
super(VarLenBatchNormalization, self).__init__(**kwargs)
def _reduce(self, tensor, multiplier):
return tf.reduce_sum(tensor, axis=self.reduce_dims) * multiplier
def call(self, inputs, mask, inverse_normalizer):
if self.parameters.mode == base_layers.TRAIN:
self._assert_rank_and_type(inputs, self.rank)
self._assert_rank_and_type(mask, self.rank)
inputs = mask * inputs
mean_mom = self._reduce(inputs, inverse_normalizer)
var_mom = self._reduce(inputs * inputs, inverse_normalizer)
return mask * self._batch_norm(inputs, mean_mom, var_mom)
elif self.parameters.mode == base_layers.EVAL:
return mask * self._batch_norm(inputs, None, None)
return self._batch_norm(inputs, None, None)
class LayerNormalization(base_layers.BaseLayer):
"""A class that applies layer normalization to the input tensor."""
def __init__(self, axes=None, **kwargs):
self.axes = axes or [-1]
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.rank = len(input_shape)
for i, axis in enumerate(self.axes):
if axis < 0:
self.axes[i] += self.rank
assert (self.axes[i] > 0 and self.axes[i] < self.rank)
self.offset = self.add_weight(
"offset",
shape=[1],
initializer=tf.keras.initializers.Zeros(),
trainable=True)
self.scale = self.add_weight(
"scale",
shape=[1],
initializer=tf.keras.initializers.Ones(),
trainable=True)
def call(self, tensor):
tensor = self.qactivation(tensor)
if self.parameters.mode != base_layers.TFLITE:
mean, variance = tf.nn.moments(tensor, self.axes, keepdims=True)
# If all the values in the tensor are same, variance will be 0. Adding a
# small epsilon to variance ensures that we get 0 as the normalized result
# instead of NaN in the resulting tensor.
tensor = (tensor - mean) / tf.sqrt(variance + 1e-6)
return tensor * self.scale + self.offset
else:
return tf_custom_ops_py.layer_norm(
tensor, self.scale, self.offset, axes=self.axes)
| 5,515 | 38.4 | 80 | py |
models | models-master/research/seq_flow_lite/layers/transformer_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for Transformer encoder."""
# pylint: disable=arguments-renamed
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import dense_layers # import seq_flow_lite module
from layers import embedding_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
from tf_ops import tf_custom_ops_py # import seq_flow_lite module
class SelfAttention(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.dense_layers = [
dense_layers.BaseQDenseVarLen(
units=self.filters, activation=None, **kwargs)
for i in range(num_heads * 3)
]
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=2, **kwargs)
super(SelfAttention, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
tensors = [
layer(inputs_rank2, mask_rank2, inverse_normalizer)
for layer in self.dense_layers
]
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
tensors = [
tf.reshape(tensor, [batch_size, -1, self.filters])
for tensor in tensors
]
context = []
if attn_mask is None:
attn_mask = tf.matmul(mask, tf.transpose(mask, [0, 2, 1]))
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
for _ in range(self.num_heads):
keys = tensors.pop()
values = tensors.pop()
queries = tensors.pop()
# Attention is not scaled dot product, batch normalization compensates
# for it.
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(queries, [0, 2, 1])
attn_logits = self.qactivation(tf.matmul(keys, queries))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
else:
queries = tf.transpose(queries)
attn_logits_masked = self.qactivation(tf.matmul(keys, queries))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
ctx = tf.matmul(attention, values)
ctx = tf.reshape(ctx, [1, -1, self.filters])
context.append(ctx)
return self.qconcat(context)
class SelfAttentionV2(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension * 3, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(SelfAttentionV2, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
tensors = self.dense_layers(inputs_rank2, mask_rank2, inverse_normalizer)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
tensors = tf.reshape(tensors, [bsz, -1, 3, self.num_heads, self.filters])
tensors = tf.unstack(tensors, axis=2)
else:
tensors = tf.split(tensors, self.num_heads * 3, axis=1)
if attn_mask is None:
attn_mask = tf.matmul(mask, mask, transpose_b=True)
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
attn_mask = tf.expand_dims(attn_mask, axis=1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(tensors[0], [0, 2, 1, 3])
keys = tf.transpose(tensors[1], [0, 2, 1, 3])
values = tf.transpose(tensors[2], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
context = []
for idx in range(self.num_heads):
queries = tensors[idx]
keys = tensors[idx + self.num_heads]
values = tensors[idx + self.num_heads * 2]
# Attention is not scaled dot product, batch normalization compensates
# for it.
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
result = self.qconcat(context)
return tf.reshape(result, [1, -1, self.model_dimension])
class TransformerEncoder(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self,
model_dimension,
num_heads,
intermediate_size,
initializer_stddev=0.02,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.model_dimension = model_dimension
self.parameters.initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_stddev)
self.self_attn = SelfAttentionV2(
model_dimension,
num_heads,
attention_dropout_rate=attention_dropout_rate,
parameters=self.parameters)
self.prx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.upprx = dense_layers.BaseQDenseVarLen(
intermediate_size, parameters=self.parameters)
self.downprx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, mask, inverse_normalizer, attn_mask=None):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
mask_rank2 = tf.reshape(mask, [-1, 1])
assert inputs.get_shape().as_list()[-1] == self.model_dimension
tensor = self.self_attn(inputs, mask, inverse_normalizer, attn_mask)
inputs = tf.reshape(inputs, [-1, self.model_dimension])
tensor = tf.reshape(tensor, [-1, self.model_dimension])
tensor = self.prx(tensor, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
tensor = tf.nn.dropout(tensor, rate=self.activation_dropout_rate)
inputs_plus_selfattn = self.q1(self.ln1(inputs + tensor))
ffn_up = self.upprx(inputs_plus_selfattn, mask_rank2, inverse_normalizer)
ffn_down = self.downprx(ffn_up, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
ffn_down = tf.nn.dropout(ffn_down, rate=self.activation_dropout_rate)
inputs_plus_ffn = self.q2(self.ln2(inputs_plus_selfattn + ffn_down))
return tf.reshape(inputs_plus_ffn, [batch_size, -1, self.model_dimension])
class TransformerEncoderStack(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self, num_layers, max_time_step, vocabulary_size, embedding_size,
model_dimension, num_heads, intermediate_size, **kwargs):
self.max_time_step = max_time_step
self.vocabulary_size = vocabulary_size
self.embedding_size = embedding_size
activation_dropout_rate = kwargs.pop('activation_dropout_rate', 0.0)
attention_dropout_rate = kwargs.pop('attention_dropout_rate', 0.0)
self.layers = []
for _ in range(num_layers):
self.layers.append(
TransformerEncoder(
model_dimension=model_dimension,
num_heads=num_heads,
intermediate_size=intermediate_size,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
self.embedding = embedding_layers.EmbeddingLayer(
shape=[self.vocabulary_size, self.embedding_size], **kwargs)
self.positional_embedding = embedding_layers.EmbeddingLayer(
shape=[self.max_time_step, self.embedding_size], **kwargs)
self.ln = normalization_layers.LayerNormalization(**kwargs)
self.qact = quantization_layers.ActivationQuantization(**kwargs)
super(TransformerEncoderStack, self).__init__(**kwargs)
def call(self, input_indices, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(input_indices)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank3))
if self.parameters.mode in [base_layers.PREDICT, base_layers.TFLITE]:
sequence_length = tf.reduce_sum(input_indices + 1 - input_indices)
pos_indices = tf.range(sequence_length, dtype=tf.int32)
pos_indices = tf.reshape(pos_indices, [1, -1])
else:
pos_indices = tf.cumsum(mask_rank2, axis=1, exclusive=True)
pos_indices = tf.cast(pos_indices, dtype=tf.int32)
input_values = self.embedding(input_indices)
pos_values = self.positional_embedding(pos_indices)
inputs = self.qact(self.ln(input_values + pos_values))
attn_mask = tf.matmul(mask_rank3, tf.transpose(mask_rank3, [0, 2, 1]))
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
for layer in self.layers:
outputs = layer(inputs, mask_rank3, inverse_normalizer, attn_mask)
inputs = outputs
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs = outputs * mask_rank3
return outputs
class TransformerEncoderStackWithInputEmbedding(TransformerEncoderStack):
"""Transformer Encoder."""
def call(self, inputs, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(mask_rank3))
attn_mask = tf.matmul(mask_rank3, tf.transpose(mask_rank3, [0, 2, 1]))
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
for layer in self.layers:
outputs = layer(inputs, mask_rank3, inverse_normalizer, attn_mask)
inputs = outputs
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
outputs = outputs * mask_rank3
return outputs
class FunnelAttention(base_layers.BaseLayer):
"""Self attention encoder (not suitable for causal attention)."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.q_dense_layer = dense_layers.BaseQDenseVarLen(
units=model_dimension, activation=None, **kwargs)
self.kv_dense_layer = dense_layers.BaseQDenseVarLen(
units=model_dimension * 2, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(FunnelAttention, self).__init__(**kwargs)
def call(self, inputs, mask, inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
self._assert_rank_and_type(memory, 3)
self._assert_rank_and_type(memory_mask, 3)
assert memory.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
mask_rank2 = tf.reshape(mask, [-1, 1])
q_tensor = self.q_dense_layer(inputs_rank2, mask_rank2, inverse_normalizer)
memory_rank2 = tf.reshape(memory, [-1, self.model_dimension])
memory_mask_rank2 = tf.reshape(memory_mask, [-1, 1])
kv_tensors = self.kv_dense_layer(memory_rank2, memory_mask_rank2,
inverse_normalizer)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
q_tensor = tf.reshape(q_tensor, [bsz, -1, self.num_heads, self.filters])
kv_tensors = tf.reshape(kv_tensors,
[bsz, -1, 2, self.num_heads, self.filters])
kv_tensors = tf.unstack(kv_tensors, axis=2)
else:
q_tensor = tf.split(q_tensor, self.num_heads, axis=1)
kv_tensors = tf.split(kv_tensors, self.num_heads * 2, axis=1)
attn_mask = tf.expand_dims(attn_mask, axis=1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
queries = tf.transpose(q_tensor, [0, 2, 1, 3])
keys = tf.transpose(kv_tensors[0], [0, 2, 1, 3])
values = tf.transpose(kv_tensors[1], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
context = []
for idx in range(self.num_heads):
queries = q_tensor[idx]
keys = kv_tensors[idx]
values = kv_tensors[idx + self.num_heads]
# Attention is not scaled dot product, batch normalization compensates
# for it.
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(tf.matmul(attention, values))
result = self.qconcat(context)
return tf.reshape(result, [1, -1, self.model_dimension])
class FunnelTransformerEncoder(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self,
model_dimension,
num_heads,
intermediate_size,
initializer_stddev=0.02,
activation_dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(FunnelTransformerEncoder, self).__init__(**kwargs)
self.model_dimension = model_dimension
self.parameters.initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_stddev)
self.self_attn = FunnelAttention(
model_dimension,
num_heads,
attention_dropout_rate=attention_dropout_rate,
parameters=self.parameters)
self.prx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.upprx = dense_layers.BaseQDenseVarLen(
intermediate_size, parameters=self.parameters)
self.downprx = dense_layers.BaseQDenseVarLen(
model_dimension, activation=None, parameters=self.parameters)
self.activation_dropout_rate = activation_dropout_rate
self.ln1 = normalization_layers.LayerNormalization(**kwargs)
self.ln2 = normalization_layers.LayerNormalization(**kwargs)
self.q1 = quantization_layers.ActivationQuantization(**kwargs)
self.q2 = quantization_layers.ActivationQuantization(**kwargs)
def call(self, inputs, mask, inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask):
batch_size = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
mask_rank2 = tf.reshape(mask, [-1, 1])
assert inputs.get_shape().as_list()[-1] == self.model_dimension
tensor = self.self_attn(inputs, mask, inverse_normalizer, memory,
memory_mask, memory_inverse_normalizer, attn_mask)
inputs = tf.reshape(inputs, [-1, self.model_dimension])
tensor = tf.reshape(tensor, [-1, self.model_dimension])
tensor = self.prx(tensor, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
tensor = tf.nn.dropout(tensor, rate=self.activation_dropout_rate)
inputs_plus_selfattn = self.q1(self.ln1(inputs + tensor))
ffn_up = self.upprx(inputs_plus_selfattn, mask_rank2, inverse_normalizer)
ffn_down = self.downprx(ffn_up, mask_rank2, inverse_normalizer)
if (self.parameters.mode == base_layers.TRAIN and
self.activation_dropout_rate > 0.0):
ffn_down = tf.nn.dropout(ffn_down, rate=self.activation_dropout_rate)
inputs_plus_ffn = self.q2(self.ln2(inputs_plus_selfattn + ffn_down))
return tf.reshape(inputs_plus_ffn, [batch_size, -1, self.model_dimension])
class FunnelTransformerEncoderStack(base_layers.BaseLayer):
"""Transformer Encoder."""
def __init__(self, num_layers, max_time_step, vocabulary_size, embedding_size,
model_dimension, num_heads, intermediate_size, **kwargs):
self.max_time_step = max_time_step
self.pool_windows = kwargs.pop('pool_windows', [])
assert len(self.pool_windows) == num_layers
self.vocabulary_size = vocabulary_size
activation_dropout_rate = kwargs.pop('activation_dropout_rate', 0.0)
attention_dropout_rate = kwargs.pop('attention_dropout_rate', 0.0)
self.layers = []
for _ in range(num_layers):
self.layers.append(
FunnelTransformerEncoder(
model_dimension=model_dimension,
num_heads=num_heads,
intermediate_size=intermediate_size,
activation_dropout_rate=activation_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
**kwargs))
super(FunnelTransformerEncoderStack, self).__init__(**kwargs)
def call(self, inputs, sequence_length):
mask_rank2 = tf.sequence_mask(
sequence_length, tf.shape(inputs)[1], dtype=tf.float32)
mask_rank3 = tf.expand_dims(mask_rank2, axis=2)
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
inputs = inputs * mask_rank3
pooled_inputs = inputs
pooled_mask = mask_rank3
pooled_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(pooled_mask))
memory = pooled_inputs
memory_mask = pooled_mask
memory_inverse_normalizer = pooled_inverse_normalizer
for i, layer in enumerate(self.layers):
if self.pool_windows[i] > 1:
pooled_inputs = tf.nn.avg_pool(
pooled_inputs, [self.pool_windows[i]],
strides=[self.pool_windows[i]],
padding='SAME')
pooled_mask = pooled_mask[:, ::self.pool_windows[i], :]
pooled_inverse_normalizer = tf.math.reciprocal(
tf.reduce_sum(pooled_mask))
attn_mask = tf.matmul(pooled_mask, memory_mask, transpose_b=True)
pooled_outputs = layer(pooled_inputs, pooled_mask,
pooled_inverse_normalizer, memory, memory_mask,
memory_inverse_normalizer, attn_mask)
pooled_inputs = pooled_outputs
pooled_inverse_normalizer = tf.math.reciprocal(tf.reduce_sum(pooled_mask))
memory = pooled_inputs
memory_mask = pooled_mask
memory_inverse_normalizer = pooled_inverse_normalizer
if self.parameters.mode not in [base_layers.PREDICT, base_layers.TFLITE]:
pooled_outputs = pooled_outputs * pooled_mask
return pooled_outputs, pooled_mask
class DecoderMultiheadAttention(base_layers.BaseLayer):
"""Multihead attention for decoder."""
def __init__(self,
model_dimension,
num_heads,
attention_dropout_rate=0.0,
cached_kv=False,
**kwargs):
self.model_dimension = model_dimension
self.num_heads = num_heads
self.filters = model_dimension // num_heads
self.cached_kv = cached_kv
self.q_dense_layers = dense_layers.BaseQDense(
units=model_dimension,
activation=None,
normalize=False,
bias=False,
**kwargs)
self.kv_dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension * 2, activation=None, **kwargs)
self.qactivation = quantization_layers.ActivationQuantization(**kwargs)
self.attention_dropout_rate = attention_dropout_rate
self.qconcat = quantization_layers.ConcatQuantization(axis=1, **kwargs)
super(DecoderMultiheadAttention, self).__init__(**kwargs)
def call(self,
inputs,
input_mask,
input_inverse_normalizer,
memory=None,
memory_mask=None,
memory_inverse_normalizer=None,
attn_mask=None):
bsz = self.get_batch_dimension(inputs)
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(input_mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
inputs_rank2 = tf.reshape(inputs, [-1, self.model_dimension])
q_tensor = self.q_dense_layers(inputs_rank2)
if memory is not None:
self._assert_rank_and_type(memory, 2)
self._assert_rank_and_type(memory_mask, 2)
if self.cached_kv:
# Keys and Values are cached and reused at each layer.
assert memory.get_shape().as_list()[1] == 2 * self.model_dimension
kv_tensors = memory
else:
kv_tensors = self.kv_dense_layers(memory, memory_mask,
memory_inverse_normalizer)
else:
kv_tensors = self.kv_dense_layers(inputs_rank2)
if self.parameters.mode not in [base_layers.TFLITE, base_layers.PREDICT]:
q_tensor = tf.reshape(q_tensor, [bsz, -1, self.num_heads, self.filters])
kv_tensors = tf.reshape(kv_tensors,
[bsz, -1, 2, self.num_heads, self.filters])
kv_tensors = tf.unstack(kv_tensors, axis=2)
else:
q_tensor = tf.split(q_tensor, self.num_heads, axis=1)
kv_tensors = tf.split(kv_tensors, self.num_heads * 2, axis=1)
if self.parameters.mode in [base_layers.TRAIN, base_layers.EVAL]:
assert attn_mask is not None
if (self.attention_dropout_rate > 0.0 and
self.parameters.mode == base_layers.TRAIN):
attn_mask *= self.random_drop_to_zero(attn_mask,
self.attention_dropout_rate)
attn_mask = tf.expand_dims(attn_mask, 1)
invalid_mask = (1 - attn_mask) * self.parameters.invalid_logit
queries = tf.transpose(q_tensor, [0, 2, 1, 3])
keys = tf.transpose(kv_tensors[0], [0, 2, 1, 3])
values = tf.transpose(kv_tensors[1], [0, 2, 1, 3])
attn_logits = self.qactivation(tf.matmul(queries, keys, transpose_b=True))
attn_logits_masked = attn_logits * attn_mask + invalid_mask
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
result = tf.matmul(attention, values)
result = tf.transpose(result, [0, 2, 1, 3])
result = tf.reshape(result, [bsz, -1, self.model_dimension])
return self.qconcat([result])
else:
# We need to invoke the keras layer before calling APIs that it provides
# such as quantize_using_range.
self.qconcat(None)
context = []
for head in range(self.num_heads):
queries = q_tensor[head]
if self.parameters.mode == base_layers.PREDICT:
# PREDICT mode assumes callers tile and merge beam size with batch
# size. Hence extracting the first entry in the tile to compute
# attention.
keys = tf.split(kv_tensors[head], bsz, axis=0)
keys = keys[0]
values = tf.split(kv_tensors[head + self.num_heads], bsz, axis=0)
values = values[0]
else:
keys = kv_tensors[head]
values = kv_tensors[head + self.num_heads]
attn_logits_masked = self.qactivation(
tf.matmul(queries, keys, transpose_b=True))
attention = tf.nn.softmax(attn_logits_masked)
attention = self.qrange_sigmoid(attention, tf_only=True)
context.append(
self.qconcat.quantize_using_range(tf.matmul(attention, values)))
# Concatenating heads along axis 1.
result = self.qconcat.quantize_using_range(tf.concat(context, axis=1))
return tf.reshape(result, [-1, 1, self.model_dimension])
class DecoderUniformAttention(base_layers.BaseLayer):
"""Decoder uniform attention."""
def __init__(self,
model_dimension,
max_time_step,
attention_dropout_rate=0.0,
beam_size=1,
**kwargs):
self.model_dimension = model_dimension
self.max_time_step = max_time_step
self.beam_size = beam_size
self.causal_mask = tf.expand_dims(
tf.linalg.band_part(tf.ones([max_time_step, max_time_step]), -1, 0), 0)
self.dense_layers = dense_layers.BaseQDenseVarLen(
units=model_dimension,
activation=None,
normalize=False,
bias=False,
rank=3,
**kwargs)
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
super(DecoderUniformAttention, self).__init__(**kwargs)
def get_uniform_attention(self, attn_mask=None):
"""Generates uniform attention matrix using `causal_mask`."""
mask = tf.math.divide_no_nan(
self.causal_mask,
tf.reduce_sum(self.causal_mask, axis=-1, keepdims=True))
if attn_mask is not None:
self._assert_rank_and_type(attn_mask, 3)
mask = mask * attn_mask
return mask
def call(self,
inputs,
mask,
inverse_normalizer,
step=None,
beam_indices=None,
cache=None,
attn_mask=None):
self._assert_rank_and_type(inputs, 3)
self._assert_rank_and_type(mask, 3)
assert inputs.get_shape().as_list()[-1] == self.model_dimension
layer_out = self.dense_layers(inputs, mask, inverse_normalizer)
# TFLite mode is handled with a custom op.
if self.parameters.mode == base_layers.TFLITE:
assert beam_indices is not None
assert step is not None
layer_out = tf_custom_ops_py.uniform_causal_attn(
layer_out, step, beam_indices, self.model_dimension, self.beam_size)
else:
# Cache is used for TF Predict and Eval modes.
if cache is None:
attention_matrix = self.get_uniform_attention(attn_mask)
layer_out = tf.matmul(attention_matrix, layer_out)
else:
assert self.parameters.mode in [base_layers.PREDICT, base_layers.EVAL]
assert step is not None
cache['uniform_avg'] = layer_out + cache['uniform_avg']
layer_out = cache['uniform_avg'] / tf.cast(step, dtype=tf.float32)
return self.qoutput(layer_out)
| 30,161 | 43.817236 | 80 | py |
models | models-master/research/seq_flow_lite/layers/conv_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base layer for convolution."""
import copy
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class EncoderQConvolution(base_layers.BaseLayer):
"""Quantized encoder convolution layers."""
def __init__(self,
filters,
ksize,
stride=1,
padding="SAME",
dilations=None,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=4,
normalization_fn=None,
**kwargs):
self.out_filters = filters
assert rank >= 3 and rank <= 4
self.rank = rank
self.ksize = self._unpack(ksize)
self.strides = self._unpack(stride)
self.dilations = [1] + self._unpack(dilations) + [1] if dilations else None
self.activation = activation
self.bias = bias
self.padding = padding
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(normalization_fn=normalization_fn, **kwargs)
super(EncoderQConvolution, self).__init__(**kwargs)
def _unpack(self, value):
if not isinstance(value, list):
assert isinstance(value, int)
return [1 if self.rank == 3 else value, value]
else:
assert len(value) == 2 and self.rank == 4
assert isinstance(value[0], int) and isinstance(value[1], int)
return value
def build(self, input_shapes):
assert len(input_shapes) == self.rank
self.in_filters = input_shapes[-1]
shape = self.ksize + [self.in_filters, self.out_filters]
self.filters = self.add_weight_wrapper(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.out_filters])
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.BatchNormalization(**kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def _conv_r4(self, inputs, normalize_method):
outputs = tf.nn.conv2d(
inputs,
self.quantize_parameter(self.filters),
strides=self.strides,
padding=self.padding,
dilations=self.dilations)
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
return self.qoutput(outputs)
def _conv_r3(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
inputs_r4 = tf.reshape(inputs, [bsz, 1, -1, self.in_filters])
outputs = self._conv_r4(inputs_r4, normalize_method)
return tf.reshape(outputs, [bsz, -1, self.out_filters])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 3:
return self._conv_r3(inputs, normalize_method)
return self._conv_r4(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class EncoderQConvolutionVarLen(EncoderQConvolution):
"""Convolution on variable length sequence."""
def _create_normalizer(self, normalization_fn, **kwargs):
if normalization_fn is None:
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=4, **kwargs)
else:
self.normalization = copy.deepcopy(normalization_fn)
def call(self, inputs, mask, inverse_normalizer):
def normalize_method(tensor):
return self.normalization(tensor, mask, inverse_normalizer)
return self._do_call(inputs, normalize_method)
| 4,468 | 34.468254 | 80 | py |
models | models-master/research/seq_flow_lite/layers/quantization_layers.py | # Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers for quantization."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
class ActivationQuantization(base_layers.BaseLayer):
"""A class that applies quantization to a activation tensor."""
def __init__(self, ema_decay=0.99, num_bits=8, **kwargs):
self.ema_decay = ema_decay
self.num_bits = num_bits
super(ActivationQuantization, self).__init__(**kwargs)
def build(self, input_shapes):
if self.parameters.quantize:
self.min_var = self.add_weight(
"min", initializer=tf.keras.initializers.Zeros(), trainable=False)
self.max_var = self.add_weight(
"max", initializer=tf.keras.initializers.Ones(), trainable=False)
def call(self, inputs):
if self.parameters.quantize:
if self.parameters.mode == base_layers.TRAIN:
# Toco expects 0.0 to be part of the quantization range.
batch_min = tf.minimum(tf.reduce_min(inputs), 0.0)
min_var = self.assign_moving_average(self.min_var, batch_min,
self.ema_decay)
batch_max = tf.maximum(tf.reduce_max(inputs), 0.0)
max_var = self.assign_moving_average(self.max_var, batch_max,
self.ema_decay)
with tf.control_dependencies([min_var, max_var]):
return tf.quantization.fake_quant_with_min_max_vars(
inputs, batch_min, batch_max, num_bits=self.num_bits)
else:
return tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var, num_bits=self.num_bits)
return inputs
def quantize_using_range(self, inputs):
# This method can only be called after a call to "call" method in this class
if self.parameters.quantize:
return tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var, num_bits=self.num_bits)
return inputs
class ConcatQuantization(ActivationQuantization):
"""A class that applies quantization to a activation tensor."""
def __init__(self, axis=2, **kwargs):
self.axis = axis
super(ConcatQuantization, self).__init__(**kwargs)
def _reduce_list(self, tensor_list, functor):
reduce_result = [functor(tensor) for tensor in tensor_list]
# Toco expects 0.0 to be part of the quantization range.
reduce_result.append(tf.constant(0.0))
return functor(tf.stack(reduce_result))
def call(self, tensors):
# Ignore empty invocations done to build the keras layer.
if tensors is None:
return
if self.parameters.quantize:
if self.parameters.mode == base_layers.TRAIN:
# Toco expects 0.0 to be part of the quantization range.
batch_min = self._reduce_list(tensors, tf.reduce_min)
min_var = self.assign_moving_average(self.min_var, batch_min,
self.ema_decay)
batch_max = self._reduce_list(tensors, tf.reduce_max)
max_var = self.assign_moving_average(self.max_var, batch_max,
self.ema_decay)
else:
min_var, max_var = self.min_var, self.max_var
tensors = [
tf.quantization.fake_quant_with_min_max_vars(
tensor, min_var, max_var, num_bits=self.num_bits)
for tensor in tensors
]
tensor = tf.concat(tensors, axis=self.axis)
return tf.quantization.fake_quant_with_min_max_vars(
tensor, min_var, max_var, num_bits=self.num_bits)
return tf.concat(tensors, axis=self.axis)
| 4,237 | 40.145631 | 80 | py |
models | models-master/research/attention_ocr/python/sequence_layers.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various implementations of sequence layers for character prediction.
A 'sequence layer' is a part of a computation graph which is responsible of
producing a sequence of characters using extracted image features. There are
many reasonable ways to implement such layers. All of them are using RNNs.
This module provides implementations which uses 'attention' mechanism to
spatially 'pool' image features and also can use a previously predicted
character to predict the next (aka auto regression).
Usage:
Select one of available classes, e.g. Attention or use a wrapper function to
pick one based on your requirements:
layer_class = sequence_layers.get_layer_class(use_attention=True,
use_autoregression=True)
layer = layer_class(net, labels_one_hot, model_params, method_params)
char_logits = layer.create_logits()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import abc
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs):
"""Generates orthonormal matrices with random values.
Orthonormal initialization is important for RNNs:
http://arxiv.org/abs/1312.6120
http://smerity.com/articles/2016/orthogonal_init.html
For non-square shapes the returned matrix will be semi-orthonormal: if the
number of columns exceeds the number of rows, then the rows are orthonormal
vectors; but if the number of rows exceeds the number of columns, then the
columns are orthonormal vectors.
We use SVD decomposition to generate an orthonormal matrix with random
values. The same way as it is done in the Lasagne library for Theano. Note
that both u and v returned by the svd are orthogonal and random. We just need
to pick one with the right shape.
Args:
shape: a shape of the tensor matrix to initialize.
dtype: a dtype of the initialized tensor.
*args: not used.
**kwargs: not used.
Returns:
An initialized tensor.
"""
del args
del kwargs
flat_shape = (shape[0], np.prod(shape[1:]))
w = np.random.randn(*flat_shape)
u, _, v = np.linalg.svd(w, full_matrices=False)
w = u if u.shape == flat_shape else v
return tf.constant(w.reshape(shape), dtype=dtype)
SequenceLayerParams = collections.namedtuple('SequenceLogitsParams', [
'num_lstm_units', 'weight_decay', 'lstm_state_clip_value'
])
class SequenceLayerBase(object):
"""A base abstruct class for all sequence layers.
A child class has to define following methods:
get_train_input
get_eval_input
unroll_cell
"""
__metaclass__ = abc.ABCMeta
def __init__(self, net, labels_one_hot, model_params, method_params):
"""Stores argument in member variable for further use.
Args:
net: A tensor with shape [batch_size, num_features, feature_size] which
contains some extracted image features.
labels_one_hot: An optional (can be None) ground truth labels for the
input features. Is a tensor with shape
[batch_size, seq_length, num_char_classes]
model_params: A namedtuple with model parameters (model.ModelParams).
method_params: A SequenceLayerParams instance.
"""
self._params = model_params
self._mparams = method_params
self._net = net
self._labels_one_hot = labels_one_hot
self._batch_size = tf.shape(input=net)[0]
# Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder.
self._char_logits = {}
regularizer = tf.keras.regularizers.l2(0.5 * (self._mparams.weight_decay))
self._softmax_w = slim.model_variable(
'softmax_w',
[self._mparams.num_lstm_units, self._params.num_char_classes],
initializer=orthogonal_initializer,
regularizer=regularizer)
self._softmax_b = slim.model_variable(
'softmax_b', [self._params.num_char_classes],
initializer=tf.compat.v1.zeros_initializer(),
regularizer=regularizer)
@abc.abstractmethod
def get_train_input(self, prev, i):
"""Returns a sample to be used to predict a character during training.
This function is used as a loop_function for an RNN decoder.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
pass
@abc.abstractmethod
def get_eval_input(self, prev, i):
"""Returns a sample to be used to predict a character during inference.
This function is used as a loop_function for an RNN decoder.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
raise AssertionError('Not implemented')
@abc.abstractmethod
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
"""Unrolls an RNN cell for all inputs.
This is a placeholder to call some RNN decoder. It has a similar to
tf.seq2seq.rnn_decode interface.
Args:
decoder_inputs: A list of 2D Tensors* [batch_size x input_size]. In fact,
most of existing decoders in presence of a loop_function use only the
first element to determine batch_size and length of the list to
determine number of steps.
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
loop_function: function will be applied to the i-th output in order to
generate the i+1-st input (see self.get_input).
cell: rnn_cell.RNNCell defining the cell function and size.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of character logits of the same length as
decoder_inputs of 2D Tensors with shape [batch_size x num_characters].
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
pass
def is_training(self):
"""Returns True if the layer is created for training stage."""
return self._labels_one_hot is not None
def char_logit(self, inputs, char_index):
"""Creates logits for a character if required.
Args:
inputs: A tensor with shape [batch_size, ?] (depth is implementation
dependent).
char_index: A integer index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, num_char_classes]
"""
if char_index not in self._char_logits:
self._char_logits[char_index] = tf.compat.v1.nn.xw_plus_b(inputs, self._softmax_w,
self._softmax_b)
return self._char_logits[char_index]
def char_one_hot(self, logit):
"""Creates one hot encoding for a logit of a character.
Args:
logit: A tensor with shape [batch_size, num_char_classes].
Returns:
A tensor with shape [batch_size, num_char_classes]
"""
prediction = tf.argmax(input=logit, axis=1)
return slim.one_hot_encoding(prediction, self._params.num_char_classes)
def get_input(self, prev, i):
"""A wrapper for get_train_input and get_eval_input.
Args:
prev: output tensor from previous step of the RNN. A tensor with shape:
[batch_size, num_char_classes].
i: index of a character in the output sequence.
Returns:
A tensor with shape [batch_size, ?] - depth depends on implementation
details.
"""
if self.is_training():
return self.get_train_input(prev, i)
else:
return self.get_eval_input(prev, i)
def create_logits(self):
"""Creates character sequence logits for a net specified in the constructor.
A "main" method for the sequence layer which glues together all pieces.
Returns:
A tensor with shape [batch_size, seq_length, num_char_classes].
"""
with tf.compat.v1.variable_scope('LSTM'):
first_label = self.get_input(prev=None, i=0)
decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)
lstm_cell = tf.compat.v1.nn.rnn_cell.LSTMCell(
self._mparams.num_lstm_units,
use_peepholes=False,
cell_clip=self._mparams.lstm_state_clip_value,
state_is_tuple=True,
initializer=orthogonal_initializer)
lstm_outputs, _ = self.unroll_cell(
decoder_inputs=decoder_inputs,
initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),
loop_function=self.get_input,
cell=lstm_cell)
with tf.compat.v1.variable_scope('logits'):
logits_list = [
tf.expand_dims(self.char_logit(logit, i), axis=1)
for i, logit in enumerate(lstm_outputs)
]
return tf.concat(logits_list, 1)
class NetSlice(SequenceLayerBase):
"""A layer which uses a subset of image features to predict each character.
"""
def __init__(self, *args, **kwargs):
super(NetSlice, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros(
tf.stack([self._batch_size, self._params.num_char_classes]))
def get_image_feature(self, char_index):
"""Returns a subset of image features for a character.
Args:
char_index: an index of a character.
Returns:
A tensor with shape [batch_size, ?]. The output depth depends on the
depth of input net.
"""
batch_size, features_num, _ = [d.value for d in self._net.get_shape()]
slice_len = int(features_num / self._params.seq_length)
# In case when features_num != seq_length, we just pick a subset of image
# features, this choice is arbitrary and there is no intuitive geometrical
# interpretation. If features_num is not dividable by seq_length there will
# be unused image features.
net_slice = self._net[:, char_index:char_index + slice_len, :]
feature = tf.reshape(net_slice, [batch_size, -1])
logging.debug('Image feature: %s', feature)
return feature
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
del prev
return self.get_image_feature(i)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
return self.get_eval_input(prev, i)
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
"""See SequenceLayerBase.unroll_cell for details."""
return tf.contrib.legacy_seq2seq.rnn_decoder(
decoder_inputs=decoder_inputs,
initial_state=initial_state,
cell=cell,
loop_function=self.get_input)
class NetSliceWithAutoregression(NetSlice):
"""A layer similar to NetSlice, but it also uses auto regression.
The "auto regression" means that we use network output for previous character
as a part of input for the current character.
"""
def __init__(self, *args, **kwargs):
super(NetSliceWithAutoregression, self).__init__(*args, **kwargs)
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
if i == 0:
prev = self._zero_label
else:
logit = self.char_logit(prev, char_index=i - 1)
prev = self.char_one_hot(logit)
image_feature = self.get_image_feature(char_index=i)
return tf.concat([image_feature, prev], 1)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
if i == 0:
prev = self._zero_label
else:
prev = self._labels_one_hot[:, i - 1, :]
image_feature = self.get_image_feature(i)
return tf.concat([image_feature, prev], 1)
class Attention(SequenceLayerBase):
"""A layer which uses attention mechanism to select image features."""
def __init__(self, *args, **kwargs):
super(Attention, self).__init__(*args, **kwargs)
self._zero_label = tf.zeros(
tf.stack([self._batch_size, self._params.num_char_classes]))
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
del prev, i
# The attention_decoder will fetch image features from the net, no need for
# extra inputs.
return self._zero_label
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
return self.get_eval_input(prev, i)
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
return tf.contrib.legacy_seq2seq.attention_decoder(
decoder_inputs=decoder_inputs,
initial_state=initial_state,
attention_states=self._net,
cell=cell,
loop_function=self.get_input)
class AttentionWithAutoregression(Attention):
"""A layer which uses both attention and auto regression."""
def __init__(self, *args, **kwargs):
super(AttentionWithAutoregression, self).__init__(*args, **kwargs)
def get_train_input(self, prev, i):
"""See SequenceLayerBase.get_train_input for details."""
if i == 0:
return self._zero_label
else:
# TODO(gorban): update to gradually introduce gt labels.
return self._labels_one_hot[:, i - 1, :]
def get_eval_input(self, prev, i):
"""See SequenceLayerBase.get_eval_input for details."""
if i == 0:
return self._zero_label
else:
logit = self.char_logit(prev, char_index=i - 1)
return self.char_one_hot(logit)
def get_layer_class(use_attention, use_autoregression):
"""A convenience function to get a layer class based on requirements.
Args:
use_attention: if True a returned class will use attention.
use_autoregression: if True a returned class will use auto regression.
Returns:
One of available sequence layers (child classes for SequenceLayerBase).
"""
if use_attention and use_autoregression:
layer_class = AttentionWithAutoregression
elif use_attention and not use_autoregression:
layer_class = Attention
elif not use_attention and not use_autoregression:
layer_class = NetSlice
elif not use_attention and use_autoregression:
layer_class = NetSliceWithAutoregression
else:
raise AssertionError('Unsupported sequence layer class')
logging.debug('Use %s as a layer class', layer_class.__name__)
return layer_class
| 15,179 | 34.886525 | 88 | py |
models | models-master/research/deep_speech/deep_speech_model.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network structure for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Supported rnn cells.
SUPPORTED_RNNS = {
"lstm": tf.keras.layers.LSTMCell,
"rnn": tf.keras.layers.SimpleRNNCell,
"gru": tf.keras.layers.GRUCell,
}
# Parameters for batch normalization.
_BATCH_NORM_EPSILON = 1e-5
_BATCH_NORM_DECAY = 0.997
# Filters of convolution layer
_CONV_FILTERS = 32
def batch_norm(inputs, training):
"""Batch normalization layer.
Note that the momentum to use will affect validation accuracy over time.
Batch norm has different behaviors during training/evaluation. With a large
momentum, the model takes longer to get a near-accurate estimation of the
moving mean/variance over the entire training dataset, which means we need
more iterations to see good evaluation results. If the training data is evenly
distributed over the feature space, we can also try setting a smaller momentum
(such as 0.1) to get good evaluation result sooner.
Args:
inputs: input data for batch norm layer.
training: a boolean to indicate if it is in training stage.
Returns:
tensor output from batch norm layer.
"""
return tf.keras.layers.BatchNormalization(
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON)(inputs, training=training)
def _conv_bn_layer(inputs, padding, filters, kernel_size, strides, layer_id,
training):
"""Defines 2D convolutional + batch normalization layer.
Args:
inputs: input data for convolution layer.
padding: padding to be applied before convolution layer.
filters: an integer, number of output filters in the convolution.
kernel_size: a tuple specifying the height and width of the 2D convolution
window.
strides: a tuple specifying the stride length of the convolution.
layer_id: an integer specifying the layer index.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output from the current layer.
"""
# Perform symmetric padding on the feature dimension of time_step
# This step is required to avoid issues when RNN output sequence is shorter
# than the label length.
inputs = tf.pad(
inputs,
[[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])
inputs = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size, strides=strides,
padding="valid", use_bias=False, activation=tf.nn.relu6,
name="cnn_{}".format(layer_id))(inputs)
return batch_norm(inputs, training)
def _rnn_layer(inputs, rnn_cell, rnn_hidden_size, layer_id, is_batch_norm,
is_bidirectional, training):
"""Defines a batch normalization + rnn layer.
Args:
inputs: input tensors for the current layer.
rnn_cell: RNN cell instance to use.
rnn_hidden_size: an integer for the dimensionality of the rnn output space.
layer_id: an integer for the index of current layer.
is_batch_norm: a boolean specifying whether to perform batch normalization
on input states.
is_bidirectional: a boolean specifying whether the rnn layer is
bi-directional.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output for the current layer.
"""
if is_batch_norm:
inputs = batch_norm(inputs, training)
if is_bidirectional:
rnn_outputs = tf.keras.layers.Bidirectional(
tf.keras.layers.RNN(rnn_cell(rnn_hidden_size),
return_sequences=True))(inputs)
else:
rnn_outputs = tf.keras.layers.RNN(
rnn_cell(rnn_hidden_size), return_sequences=True)(inputs)
return rnn_outputs
class DeepSpeech2(object):
"""Define DeepSpeech2 model."""
def __init__(self, num_rnn_layers, rnn_type, is_bidirectional,
rnn_hidden_size, num_classes, use_bias):
"""Initialize DeepSpeech2 model.
Args:
num_rnn_layers: an integer, the number of rnn layers. By default, it's 5.
rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm.
is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.
rnn_hidden_size: an integer for the number of hidden states in each unit.
num_classes: an integer, the number of output classes/labels.
use_bias: a boolean specifying whether to use bias in the last fc layer.
"""
self.num_rnn_layers = num_rnn_layers
self.rnn_type = rnn_type
self.is_bidirectional = is_bidirectional
self.rnn_hidden_size = rnn_hidden_size
self.num_classes = num_classes
self.use_bias = use_bias
def __call__(self, inputs, training):
# Two cnn layers.
inputs = _conv_bn_layer(
inputs, padding=(20, 5), filters=_CONV_FILTERS, kernel_size=(41, 11),
strides=(2, 2), layer_id=1, training=training)
inputs = _conv_bn_layer(
inputs, padding=(10, 5), filters=_CONV_FILTERS, kernel_size=(21, 11),
strides=(2, 1), layer_id=2, training=training)
# output of conv_layer2 with the shape of
# [batch_size (N), times (T), features (F), channels (C)].
# Convert the conv output to rnn input.
batch_size = tf.shape(inputs)[0]
feat_size = inputs.get_shape().as_list()[2]
inputs = tf.reshape(
inputs,
[batch_size, -1, feat_size * _CONV_FILTERS])
# RNN layers.
rnn_cell = SUPPORTED_RNNS[self.rnn_type]
for layer_counter in xrange(self.num_rnn_layers):
# No batch normalization on the first layer.
is_batch_norm = (layer_counter != 0)
inputs = _rnn_layer(
inputs, rnn_cell, self.rnn_hidden_size, layer_counter + 1,
is_batch_norm, self.is_bidirectional, training)
# FC layer with batch norm.
inputs = batch_norm(inputs, training)
logits = tf.keras.layers.Dense(
self.num_classes, use_bias=self.use_bias, activation="softmax")(inputs)
return logits
| 6,717 | 36.741573 | 89 | py |
models | models-master/research/deep_speech/deep_speech.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main entry to train and evaluate DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
import data.dataset as dataset
import decoder
import deep_speech_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
# Default vocabulary file
_VOCABULARY_FILE = os.path.join(
os.path.dirname(__file__), "data/vocabulary.txt")
# Evaluation metrics
_WER_KEY = "WER"
_CER_KEY = "CER"
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
"""Computes the time_steps/ctc_input_length after convolution.
Suppose that the original feature contains two parts:
1) Real spectrogram signals, spanning input_length steps.
2) Padded part with all 0s.
The total length of those two parts is denoted as max_time_steps, which is
the padded length of the current batch. After convolution layers, the time
steps of a spectrogram feature will be decreased. As we know the percentage
of its original length within the entire length, we can compute the time steps
for the signal after conv as follows (using ctc_input_length to denote):
ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
This length is then fed into ctc loss function to compute loss.
Args:
max_time_steps: max_time_steps for the batch, after padding.
ctc_time_steps: number of timesteps after convolution.
input_length: actual length of the original spectrogram, without padding.
Returns:
the ctc_input_length after convolution layer.
"""
ctc_input_length = tf.cast(tf.multiply(
input_length, ctc_time_steps), dtype=tf.float32)
return tf.cast(tf.math.floordiv(
ctc_input_length, tf.cast(max_time_steps, dtype=tf.float32)), dtype=tf.int32)
def evaluate_model(estimator, speech_labels, entries, input_fn_eval):
"""Evaluate the model performance using WER anc CER as metrics.
WER: Word Error Rate
CER: Character Error Rate
Args:
estimator: estimator to evaluate.
speech_labels: a string specifying all the character in the vocabulary.
entries: a list of data entries (audio_file, file_size, transcript) for the
given dataset.
input_fn_eval: data input function for evaluation.
Returns:
Evaluation result containing 'wer' and 'cer' as two metrics.
"""
# Get predictions
predictions = estimator.predict(input_fn=input_fn_eval)
# Get probabilities of each predicted class
probs = [pred["probabilities"] for pred in predictions]
num_of_examples = len(probs)
targets = [entry[2] for entry in entries] # The ground truth transcript
total_wer, total_cer = 0, 0
greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)
for i in range(num_of_examples):
# Decode string.
decoded_str = greedy_decoder.decode(probs[i])
# Compute CER.
total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(
len(targets[i]))
# Compute WER.
total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(
len(targets[i].split()))
# Get mean value
total_cer /= num_of_examples
total_wer /= num_of_examples
global_step = estimator.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP)
eval_results = {
_WER_KEY: total_wer,
_CER_KEY: total_cer,
tf.compat.v1.GraphKeys.GLOBAL_STEP: global_step,
}
return eval_results
def model_fn(features, labels, mode, params):
"""Define model function for deep speech model.
Args:
features: a dictionary of input_data features. It includes the data
input_length, label_length and the spectrogram features.
labels: a list of labels for the input data.
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.
params: a dict of hyper parameters to be passed to model_fn.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
num_classes = params["num_classes"]
input_length = features["input_length"]
label_length = features["label_length"]
features = features["features"]
# Create DeepSpeech2 model.
model = deep_speech_model.DeepSpeech2(
flags_obj.rnn_hidden_layers, flags_obj.rnn_type,
flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,
num_classes, flags_obj.use_bias)
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(features, training=False)
predictions = {
"classes": tf.argmax(logits, axis=2),
"probabilities": logits,
"logits": logits
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
# In training mode.
logits = model(features, training=True)
ctc_input_length = compute_length_after_conv(
tf.shape(features)[1], tf.shape(logits)[1], input_length)
# Compute CTC loss
loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(
labels, logits, ctc_input_length, label_length))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)
global_step = tf.compat.v1.train.get_or_create_global_step()
minimize_op = optimizer.minimize(loss, global_step=global_step)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
# Create the train_op that groups both minimize_ops and update_ops
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op)
def generate_dataset(data_dir):
"""Generate a speech dataset."""
audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,
window_ms=flags_obj.window_ms,
stride_ms=flags_obj.stride_ms,
normalize=True)
train_data_conf = dataset.DatasetConfig(
audio_conf,
data_dir,
flags_obj.vocabulary_file,
flags_obj.sortagrad
)
speech_dataset = dataset.DeepSpeechDataset(train_data_conf)
return speech_dataset
def per_device_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that distribution strategy handles this automatically when used with
Keras. For using with Estimator, we need to get per GPU batch.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
def run_deep_speech(_):
"""Run deep speech training and eval loop."""
tf.compat.v1.set_random_seed(flags_obj.seed)
# Data preprocessing
logging.info("Data preprocessing...")
train_speech_dataset = generate_dataset(flags_obj.train_data_dir)
eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)
# Number of label classes. Label string is "[a-z]' -"
num_classes = len(train_speech_dataset.speech_labels)
# Use distribution strategy for multi-gpu training
num_gpus = flags_core.get_num_gpus(flags_obj)
distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=flags_obj.model_dir,
config=run_config,
params={
"num_classes": num_classes,
}
)
# Benchmark logging
run_params = {
"batch_size": flags_obj.batch_size,
"train_epochs": flags_obj.train_epochs,
"rnn_hidden_size": flags_obj.rnn_hidden_size,
"rnn_hidden_layers": flags_obj.rnn_hidden_layers,
"rnn_type": flags_obj.rnn_type,
"is_bidirectional": flags_obj.is_bidirectional,
"use_bias": flags_obj.use_bias
}
per_replica_batch_size = per_device_batch_size(flags_obj.batch_size, num_gpus)
def input_fn_train():
return dataset.input_fn(
per_replica_batch_size, train_speech_dataset)
def input_fn_eval():
return dataset.input_fn(
per_replica_batch_size, eval_speech_dataset)
total_training_cycle = (flags_obj.train_epochs //
flags_obj.epochs_between_evals)
for cycle_index in range(total_training_cycle):
logging.info("Starting a training cycle: %d/%d",
cycle_index + 1, total_training_cycle)
# Perform batch_wise dataset shuffling
train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(
train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,
flags_obj.batch_size)
estimator.train(input_fn=input_fn_train)
# Evaluation
logging.info("Starting to evaluate...")
eval_results = evaluate_model(
estimator, eval_speech_dataset.speech_labels,
eval_speech_dataset.entries, input_fn_eval)
# Log the WER and CER results.
benchmark_logger.log_evaluation_result(eval_results)
logging.info(
"Iteration {}: WER = {:.2f}, CER = {:.2f}".format(
cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(
flags_obj.wer_threshold, eval_results[_WER_KEY]):
break
def define_deep_speech_flags():
"""Add flags for run_deep_speech."""
# Add common flags
flags_core.define_base(
data_dir=False, # we use train_data_dir and eval_data_dir instead
export_dir=True,
train_epochs=True,
hooks=True,
num_gpu=True,
epochs_between_evals=True
)
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=False
)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
model_dir="/tmp/deep_speech_model/",
export_dir="/tmp/deep_speech_saved_model/",
train_epochs=10,
batch_size=128,
hooks="")
# Deep speech flags
flags.DEFINE_integer(
name="seed", default=1,
help=flags_core.help_wrap("The random seed."))
flags.DEFINE_string(
name="train_data_dir",
default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
help=flags_core.help_wrap("The csv file path of train dataset."))
flags.DEFINE_string(
name="eval_data_dir",
default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
help=flags_core.help_wrap("The csv file path of evaluation dataset."))
flags.DEFINE_bool(
name="sortagrad", default=True,
help=flags_core.help_wrap(
"If true, sort examples by audio length and perform no "
"batch_wise shuffling for the first epoch."))
flags.DEFINE_integer(
name="sample_rate", default=16000,
help=flags_core.help_wrap("The sample rate for audio."))
flags.DEFINE_integer(
name="window_ms", default=20,
help=flags_core.help_wrap("The frame length for spectrogram."))
flags.DEFINE_integer(
name="stride_ms", default=10,
help=flags_core.help_wrap("The frame step."))
flags.DEFINE_string(
name="vocabulary_file", default=_VOCABULARY_FILE,
help=flags_core.help_wrap("The file path of vocabulary file."))
# RNN related flags
flags.DEFINE_integer(
name="rnn_hidden_size", default=800,
help=flags_core.help_wrap("The hidden size of RNNs."))
flags.DEFINE_integer(
name="rnn_hidden_layers", default=5,
help=flags_core.help_wrap("The number of RNN layers."))
flags.DEFINE_bool(
name="use_bias", default=True,
help=flags_core.help_wrap("Use bias in the last fully-connected layer"))
flags.DEFINE_bool(
name="is_bidirectional", default=True,
help=flags_core.help_wrap("If rnn unit is bidirectional"))
flags.DEFINE_enum(
name="rnn_type", default="gru",
enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),
case_sensitive=False,
help=flags_core.help_wrap("Type of RNN cell."))
# Training related flags
flags.DEFINE_float(
name="learning_rate", default=5e-4,
help=flags_core.help_wrap("The initial learning rate."))
# Evaluation metrics threshold
flags.DEFINE_float(
name="wer_threshold", default=None,
help=flags_core.help_wrap(
"If passed, training will stop when the evaluation metric WER is "
"greater than or equal to wer_threshold. For libri speech dataset "
"the desired wer_threshold is 0.23 which is the result achieved by "
"MLPerf implementation."))
def main(_):
run_deep_speech(flags_obj)
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
define_deep_speech_flags()
flags_obj = flags.FLAGS
absl_app.run(main)
| 14,268 | 33.136364 | 89 | py |
models | models-master/research/slim/nets/resnet_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 11,826 | 41.696751 | 80 | py |
models | models-master/research/slim/datasets/build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.GFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [
l.strip() for l in tf.gfile.GFile(labels_file, 'r').readlines()
]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.GFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| 26,229 | 36.152975 | 87 | py |
models | models-master/research/vid2depth/project.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Geometry utilities for projecting frames based on depth and motion.
Modified from Spatial Transformer Networks:
https://github.com/tensorflow/models/blob/master/transformer/spatial_transformer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import tensorflow as tf
def inverse_warp(img, depth, egomotion, intrinsic_mat, intrinsic_mat_inv):
"""Inverse warp a source image to the target image plane.
Args:
img: The source image (to sample pixels from) -- [B, H, W, 3].
depth: Depth map of the target image -- [B, H, W].
egomotion: 6DoF egomotion vector from target to source -- [B, 6].
intrinsic_mat: Camera intrinsic matrix -- [B, 3, 3].
intrinsic_mat_inv: Inverse of the intrinsic matrix -- [B, 3, 3].
Returns:
Projected source image
"""
dims = tf.shape(img)
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height * img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, intrinsic_mat_inv)
ones = tf.ones([batch_size, 1, img_height * img_width])
cam_coords_hom = tf.concat([cam_coords, ones], axis=1)
egomotion_mat = _egomotion_vec2mat(egomotion, batch_size)
# Get projection matrix for target camera frame to source pixel frame
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
hom_filler = tf.tile(hom_filler, [batch_size, 1, 1])
intrinsic_mat_hom = tf.concat(
[intrinsic_mat, tf.zeros([batch_size, 3, 1])], axis=2)
intrinsic_mat_hom = tf.concat([intrinsic_mat_hom, hom_filler], axis=1)
proj_target_cam_to_source_pixel = tf.matmul(intrinsic_mat_hom, egomotion_mat)
source_pixel_coords = _cam2pixel(cam_coords_hom,
proj_target_cam_to_source_pixel)
source_pixel_coords = tf.reshape(source_pixel_coords,
[batch_size, 2, img_height, img_width])
source_pixel_coords = tf.transpose(source_pixel_coords, perm=[0, 2, 3, 1])
projected_img, mask = _spatial_transformer(img, source_pixel_coords)
return projected_img, mask
def _pixel2cam(depth, pixel_coords, intrinsic_mat_inv):
"""Transform coordinates in the pixel frame to the camera frame."""
cam_coords = tf.matmul(intrinsic_mat_inv, pixel_coords) * depth
return cam_coords
def _cam2pixel(cam_coords, proj_c2p):
"""Transform coordinates in the camera frame to the pixel frame."""
pcoords = tf.matmul(proj_c2p, cam_coords)
x = tf.slice(pcoords, [0, 0, 0], [-1, 1, -1])
y = tf.slice(pcoords, [0, 1, 0], [-1, 1, -1])
z = tf.slice(pcoords, [0, 2, 0], [-1, 1, -1])
# Not tested if adding a small number is necessary
x_norm = x / (z + 1e-10)
y_norm = y / (z + 1e-10)
pixel_coords = tf.concat([x_norm, y_norm], axis=1)
return pixel_coords
def _meshgrid_abs(height, width):
"""Meshgrid in the absolute coordinates."""
x_t = tf.matmul(
tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(
tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
return grid
def _euler2mat(z, y, x):
"""Converts euler angles to rotation matrix.
From:
https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
TODO: Remove the dimension for 'N' (deprecated for converting all source
poses altogether).
Args:
z: rotation angle along z axis (in radians) -- size = [B, n]
y: rotation angle along y axis (in radians) -- size = [B, n]
x: rotation angle along x axis (in radians) -- size = [B, n]
Returns:
Rotation matrix corresponding to the euler angles, with shape [B, n, 3, 3].
"""
batch_size = tf.shape(z)[0]
n = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([batch_size, n, 1, 1])
ones = tf.ones([batch_size, n, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny, zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
return tf.matmul(tf.matmul(xmat, ymat), zmat)
def _egomotion_vec2mat(vec, batch_size):
"""Converts 6DoF transform vector to transformation matrix.
Args:
vec: 6DoF parameters [tx, ty, tz, rx, ry, rz] -- [B, 6].
batch_size: Batch size.
Returns:
A transformation matrix -- [B, 4, 4].
"""
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = _euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, squeeze_dims=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
def _bilinear_sampler(im, x, y, name='blinear_sampler'):
"""Perform bilinear sampling on im given list of x, y coordinates.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
x,y are tensors specifying normalized coordinates [-1, 1] to be sampled on im.
For example, (-1, -1) in (x, y) corresponds to pixel location (0, 0) in im,
and (1, 1) in (x, y) corresponds to the bottom right pixel in im.
Args:
im: Batch of images with shape [B, h, w, channels].
x: Tensor of normalized x coordinates in [-1, 1], with shape [B, h, w, 1].
y: Tensor of normalized y coordinates in [-1, 1], with shape [B, h, w, 1].
name: Name scope for ops.
Returns:
Sampled image with shape [B, h, w, channels].
Principled mask with shape [B, h, w, 1], dtype:float32. A value of 1.0
in the mask indicates that the corresponding coordinate in the sampled
image is valid.
"""
with tf.variable_scope(name):
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# Constants.
batch_size = tf.shape(im)[0]
_, height, width, channels = im.get_shape().as_list()
x = tf.to_float(x)
y = tf.to_float(y)
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
zero = tf.constant(0, dtype=tf.int32)
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# Scale indices from [-1, 1] to [0, width - 1] or [0, height - 1].
x = (x + 1.0) * (width_f - 1.0) / 2.0
y = (y + 1.0) * (height_f - 1.0) / 2.0
# Compute the coordinates of the 4 pixels to sample from.
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
mask = tf.logical_and(
tf.logical_and(x0 >= zero, x1 <= max_x),
tf.logical_and(y0 >= zero, y1 <= max_y))
mask = tf.to_float(mask)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width * height
# Create base index.
base = tf.range(batch_size) * dim1
base = tf.reshape(base, [-1, 1])
base = tf.tile(base, [1, height * width])
base = tf.reshape(base, [-1])
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# Use indices to lookup pixels in the flat image and restore channels dim.
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.to_float(im_flat)
pixel_a = tf.gather(im_flat, idx_a)
pixel_b = tf.gather(im_flat, idx_b)
pixel_c = tf.gather(im_flat, idx_c)
pixel_d = tf.gather(im_flat, idx_d)
x1_f = tf.to_float(x1)
y1_f = tf.to_float(y1)
# And finally calculate interpolated values.
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1)
wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1)
wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1)
output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d])
output = tf.reshape(output, tf.stack([batch_size, height, width, channels]))
mask = tf.reshape(mask, tf.stack([batch_size, height, width, 1]))
return output, mask
def _spatial_transformer(img, coords):
"""A wrapper over binlinear_sampler(), taking absolute coords as input."""
img_height = tf.cast(tf.shape(img)[1], tf.float32)
img_width = tf.cast(tf.shape(img)[2], tf.float32)
px = coords[:, :, :, :1]
py = coords[:, :, :, 1:]
# Normalize coordinates to [-1, 1] to send to _bilinear_sampler.
px = px / (img_width - 1) * 2.0 - 1.0
py = py / (img_height - 1) * 2.0 - 1.0
output_img, mask = _bilinear_sampler(img, px, py)
return output_img, mask
def get_cloud(depth, intrinsics_inv, name=None): # pylint: disable=unused-argument
"""Convert depth map to 3D point cloud."""
with tf.name_scope(name):
dims = depth.shape.as_list()
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height * img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, intrinsics_inv)
cam_coords = tf.transpose(cam_coords, [0, 2, 1])
cam_coords = tf.reshape(cam_coords, [batch_size, img_height, img_width, 3])
logging.info('depth -> cloud: %s', cam_coords)
return cam_coords
| 11,467 | 36.477124 | 83 | py |
models | models-master/orbit/controller_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.controller."""
import os
from absl import logging
from absl.testing import parameterized
import numpy as np
from orbit import controller
from orbit import runner
from orbit import standard_runner
import orbit.utils
import tensorflow as tf
def create_model():
x = tf.keras.layers.Input(shape=(3,), name="input")
y = tf.keras.layers.Dense(4, name="dense")(x)
model = tf.keras.Model(x, y)
return model
def summaries_with_matching_keyword(keyword, summary_dir):
"""Returns summary protos matching given keyword from event file."""
matches = []
event_paths = tf.io.gfile.glob(os.path.join(summary_dir, "events*"))
for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
matches.append(event.summary)
return matches
def dataset_fn(ctx):
del ctx
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.ones((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
return dataset
class TestRunner(standard_runner.StandardTrainer,
standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self, return_numpy=False):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)
self.global_step = self.optimizer.iterations
self.train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32)
self.eval_loss = tf.keras.metrics.Mean("eval_loss", dtype=tf.float32)
self.return_numpy = return_numpy
train_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
eval_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardTrainer.__init__(self, train_dataset)
standard_runner.StandardEvaluator.__init__(self, eval_dataset)
def train_step(self, iterator):
def _replicated_step(inputs):
"""Replicated training step."""
inputs, targets = inputs
with tf.GradientTape() as tape:
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
grads = tape.gradient(loss, self.model.variables)
self.optimizer.apply_gradients(zip(grads, self.model.variables))
self.train_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
def train_loop_end(self):
train_loss = self.train_loss.result()
return {
"loss": train_loss.numpy() if self.return_numpy else train_loss,
}
def build_eval_dataset(self):
return self.strategy.distribute_datasets_from_function(dataset_fn)
def eval_begin(self):
self.eval_loss.reset_states()
def eval_step(self, iterator):
def _replicated_step(inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
self.eval_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
def eval_end(self):
eval_loss = self.eval_loss.result()
return {
"eval_loss": eval_loss.numpy() if self.return_numpy else eval_loss,
}
class TestEvaluator(standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
eval_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardEvaluator.__init__(self, eval_dataset)
def eval_reduce(self, state, output):
state.append(output)
return state
def eval_begin(self):
return []
def eval_step(self, iterator):
def _replicated_step(inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
return loss
per_replica_losses = self.strategy.run(
_replicated_step, args=(next(iterator),))
mean_loss = self.strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
return mean_loss
def eval_end(self, outputs):
return {
"eval_loss": tf.reduce_mean(outputs),
}
class TestEvaluatorNoOutput(runner.AbstractEvaluator):
def evaluate(self, num_steps):
pass
class TestEvaluatorWithNestedSummary(standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
dataset2 = self.strategy.distribute_datasets_from_function(dataset_fn)
self.loss = tf.keras.metrics.Mean("loss", dtype=tf.float32)
self.accuracy = tf.keras.metrics.CategoricalAccuracy(
"accuracy", dtype=tf.float32)
self.loss2 = tf.keras.metrics.Mean("loss", dtype=tf.float32)
self.accuracy2 = tf.keras.metrics.CategoricalAccuracy(
"accuracy", dtype=tf.float32)
standard_runner.StandardEvaluator.__init__(
self, eval_dataset={
"dataset": dataset,
"dataset2": dataset2
})
def eval_step(self, iterator):
def _replicated_step(loss, accuracy, inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss.update_state(tf.keras.losses.MSE(targets, outputs))
accuracy.update_state(targets, outputs)
self.strategy.run(
lambda inputs: _replicated_step(self.loss, self.accuracy, inputs),
args=(next(iterator["dataset"]),))
self.strategy.run(
lambda inputs: _replicated_step(self.loss2, self.accuracy2, inputs),
args=(next(iterator["dataset2"]),))
def eval_end(self):
return {
"dataset": {
"loss": self.loss.result(),
"accuracy": self.accuracy.result()
},
"dataset2": {
"loss": self.loss2.result(),
"accuracy": self.accuracy2.result()
},
}
class TestTrainerWithSummaries(standard_runner.StandardTrainer):
"""A Trainer model with summaries for testing purposes."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)
self.global_step = self.optimizer.iterations
self.train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32)
train_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardTrainer.__init__(
self,
train_dataset,
options=standard_runner.StandardTrainerOptions(
use_tpu_summary_optimization=True))
def build_train_dataset(self):
return self.strategy.distribute_datasets_from_function(dataset_fn)
def train_step(self, iterator):
def _replicated_step(inputs):
"""Replicated training step."""
inputs, targets = inputs
with tf.GradientTape() as tape:
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
tf.summary.scalar("loss", loss)
grads = tape.gradient(loss, self.model.variables)
self.optimizer.apply_gradients(zip(grads, self.model.variables))
self.train_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
class ControllerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.model_dir = self.get_temp_dir()
def test_no_checkpoint(self):
test_runner = TestRunner()
# No checkpoint manager and no strategy.
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
# No checkpoint, so global step starts from 0.
test_runner.global_step.assign(0)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
def test_no_checkpoint_and_summaries(self):
test_runner = TestRunner()
# No checkpoint + summary directories.
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_has_checkpoint_no_summaries(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
# Has checkpoint, but no summary directories.
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# No summaries are saved.
self.assertEmpty(tf.io.gfile.glob(
os.path.join(checkpoint_manager.directory, "events.*")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_has_checkpoint_eval_summary_only(
self, enable_async_checkpoint_saving
):
test_runner = TestRunner()
# Has checkpoint, but no summary directories.
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# Training summaries are not saved.
self.assertEmpty(tf.io.gfile.glob(
os.path.join(checkpoint_manager.directory, "events.*")))
# Evaluation summaries are saved.
self.assertNotEmpty(tf.io.gfile.glob(
os.path.join(self.model_dir, "summaries/eval/events.*")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_restore_from_most_recent_checkpoint(
self, enable_async_checkpoint_saving
):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=5)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
steps_per_loop=5)
test_controller.train(20)
self.assertLen(checkpoint_manager.checkpoints, 4)
restored_path = test_controller.restore_checkpoint()
self.assertEqual(restored_path, checkpoint_manager.checkpoints[-1])
@parameterized.named_parameters(
("return_numpy_sync_checkpoint_saving", True, False),
("return_numpy_async_checkpoint_saving", True, True),
("return_tensor_sync_checkpoint_saving", False, False),
("return_tensor_async_checkpoint_saving", False, True),
)
def test_train_and_evaluate(
self, return_numpy, enable_async_checkpoint_saving
):
test_runner = TestRunner(return_numpy=return_numpy)
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
# Checkpoints are saved.
self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_train_only(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
)
test_controller.train(steps=10)
# Checkpoints are saved.
self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Only train summaries are written.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/eval")))
def test_evaluate_only(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
eval_results = test_controller.evaluate(steps=2)
# Only eval summaries are written
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
self.assertIn("eval_loss", eval_results)
# Tests continuous eval with timeout and timeout_fn.
done_file = os.path.join(self.model_dir, "summaries/eval/Done")
def timeout_fn():
with tf.io.gfile.GFile(done_file, "w") as f:
f.write("DONE")
return True
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.evaluate_continuously(
timeout=1, timeout_fn=timeout_fn, steps=2)
self.assertNotEmpty(tf.io.gfile.glob(done_file))
def test_no_eval_steps(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager)
test_controller.evaluate()
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_already_trained_model(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
test_runner.global_step.assign(10)
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving)
# `global_step` is already `train_steps`.
test_controller.train(steps=10)
def test_summaries_inside_train_fn(self):
test_runner = TestTrainerWithSummaries()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
summary_interval=2,
checkpoint_manager=checkpoint_manager
)
test_controller.train(steps=10)
# Checkpoints are saved.
self.assertEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Only train summaries are written.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/eval")))
def test_train_and_evaluate_with_same_summary_dir(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries"),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries")))
def test_early_stop_on_eval_loss(self):
test_runner = TestRunner()
class EarlyStopController(controller.Controller):
"""A subclass of Controller that supports early stopping."""
def train_and_evaluate(self,
train_steps: int = None,
eval_steps: int = None,
eval_interval: int = None):
while self.global_step.numpy() < train_steps:
interval = min(train_steps - self.global_step.numpy(), eval_interval)
num_steps = self.global_step.numpy() + interval
self.train(steps=num_steps, checkpoint_at_completion=False)
self._sync_on_async_checkpointing()
self.evaluate(steps=eval_steps)
# Early stop condition.
if test_runner.eval_loss.result() < 0.1:
logging.info(
"Training early stopped as eval_loss %s is less than 0.1",
test_runner.eval_loss.result())
return
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = EarlyStopController(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
checkpoint_manager=checkpoint_manager)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=6, eval_interval=2)
self.assertLess(test_runner.global_step, 10)
def test_evaluate_with_loss_output(self):
test_evaluator = TestEvaluator()
checkpoint = tf.train.Checkpoint(model=test_evaluator.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, self.model_dir, max_to_keep=None)
test_controller = controller.Controller(
evaluator=test_evaluator,
global_step=tf.Variable(0, dtype=tf.int64),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.evaluate(steps=5)
# Only eval summaries are written
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
def test_evaluate_with_no_output(self):
test_controller = controller.Controller(
evaluator=TestEvaluatorNoOutput(),
global_step=tf.Variable(0, dtype=tf.int64),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
self.assertSameElements(["steps_per_second"],
test_controller.evaluate(steps=5).keys())
def test_train_and_evaluate_reset_datasets(self):
test_runner = TestRunner()
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
train_dataset = (
test_runner.strategy.distribute_datasets_from_function(dataset_fn))
eval_dataset = (
test_runner.strategy.distribute_datasets_from_function(dataset_fn))
test_runner.train_dataset = train_dataset
test_runner.eval_dataset = eval_dataset
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_eval_and_checkpoint_interval(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=5)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=10,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
summary_dir=self.model_dir)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=5)
# Expect 3 checkpoints to be saved at step: 5, 10.
self.assertLen(
tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt-*.data*")), 2)
# Expect evaluation is performed 2 times at step: 5, 10.
self.assertLen(
summaries_with_matching_keyword("eval_loss", self.model_dir), 2)
@parameterized.named_parameters(("DefaultSummary", False),
("InjectSummary", True))
def test_evaluate_with_nested_summaries(self, inject_summary_manager):
test_evaluator = TestEvaluatorWithNestedSummary()
if inject_summary_manager:
summary_manager = orbit.utils.SummaryManager(
self.model_dir,
tf.summary.scalar,
global_step=tf.Variable(0, dtype=tf.int64))
else:
summary_manager = None
test_controller = controller.Controller(
evaluator=test_evaluator,
global_step=tf.Variable(0, dtype=tf.int64),
eval_summary_dir=self.model_dir,
summary_manager=summary_manager)
test_controller.evaluate(steps=5)
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"accuracy", os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset2")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "dataset2")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"accuracy", os.path.join(self.model_dir, "dataset2")))
def test_actions(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
class OutputRecorderAction:
"""Simple `Action` that just saves the outputs passed to `__call__`."""
def __init__(self):
self.outputs = []
def __call__(self, output):
self.outputs.append(output)
train_output_recorder = OutputRecorderAction()
eval_output_recorder = OutputRecorderAction()
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
train_actions=[train_output_recorder],
eval_actions=[eval_output_recorder],
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertLen(train_output_recorder.outputs, 5)
for output in train_output_recorder.outputs:
self.assertIn("loss", output)
self.assertGreaterEqual(output["loss"], 0)
self.assertLen(eval_output_recorder.outputs, 2)
for output in eval_output_recorder.outputs:
self.assertIn("eval_loss", output)
self.assertGreaterEqual(output["eval_loss"], 0)
def test_step_per_loop_callable(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
def steps_per_loop_fn(global_step):
if global_step > 4:
return 4
return 2
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=steps_per_loop_fn,
checkpoint_manager=checkpoint_manager
)
test_controller.train(steps=10)
self.assertEqual(test_runner.global_step, 10)
if __name__ == "__main__":
tf.test.main()
| 31,588 | 35.903037 | 80 | py |
models | models-master/orbit/examples/single_task/single_task_evaluator.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An evaluator object that can evaluate models with a single output."""
import orbit
import tensorflow as tf
class SingleTaskEvaluator(orbit.StandardEvaluator):
"""Evaluates a single-output model on a given dataset.
This evaluator will handle running a model with one output on a single
dataset, and will apply the output of that model to one or more
`tf.keras.metrics.Metric` objects.
"""
def __init__(self,
eval_dataset,
label_key,
model,
metrics,
evaluator_options=None):
"""Initializes a `SingleTaskEvaluator` instance.
If the `SingleTaskEvaluator` should run its model under a distribution
strategy, it should be created within that strategy's scope.
Arguments:
eval_dataset: A `tf.data.Dataset` or `DistributedDataset` that contains a
string-keyed dict of `Tensor`s.
label_key: The key corresponding to the label value in feature
dictionaries dequeued from `eval_dataset`. This key will be removed from
the dictionary before it is passed to the model.
model: A `tf.Module` or Keras `Model` object to evaluate.
metrics: A single `tf.keras.metrics.Metric` object, or a list of
`tf.keras.metrics.Metric` objects.
evaluator_options: An optional `orbit.StandardEvaluatorOptions` object.
"""
self.label_key = label_key
self.model = model
self.metrics = metrics if isinstance(metrics, list) else [metrics]
# Capture the strategy from the containing scope.
self.strategy = tf.distribute.get_strategy()
super(SingleTaskEvaluator, self).__init__(
eval_dataset=eval_dataset, options=evaluator_options)
def eval_begin(self):
"""Actions to take once before every eval loop."""
for metric in self.metrics:
metric.reset_states()
def eval_step(self, iterator):
"""One eval step. Called multiple times per eval loop by the superclass."""
def step_fn(inputs):
# Extract the target value and delete it from the input dict, so that
# the model never sees it.
target = inputs.pop(self.label_key)
output = self.model(inputs)
for metric in self.metrics:
metric.update_state(target, output)
# This is needed to handle distributed computation.
self.strategy.run(step_fn, args=(next(iterator),))
def eval_end(self):
"""Actions to take once after an eval loop."""
with self.strategy.scope():
# Export the metrics.
metrics = {metric.name: metric.result() for metric in self.metrics}
return metrics
| 3,195 | 35.735632 | 80 | py |
models | models-master/orbit/examples/single_task/single_task_trainer_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_trainer."""
import orbit
from orbit.examples.single_task import single_task_trainer
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskTrainerTest(tf.test.TestCase):
def test_single_task_training(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32).repeat()
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3),
tf.keras.layers.Softmax(),
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01))
controller = orbit.Controller(
trainer=trainer,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(1)
start_loss = trainer.train_loss.result().numpy()
controller.train(500)
end_loss = trainer.train_loss.result().numpy()
# Assert that the model has trained 'significantly' - that the loss
# has dropped by over 50%.
self.assertLess(end_loss, start_loss / 2)
if __name__ == '__main__':
tf.test.main()
| 1,961 | 31.163934 | 74 | py |
models | models-master/orbit/examples/single_task/single_task_trainer.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trainer object that can train models with a single output."""
import orbit
import tensorflow as tf
class SingleTaskTrainer(orbit.StandardTrainer):
"""Trains a single-output model on a given dataset.
This trainer will handle running a model with one output on a single
dataset. It will apply the provided loss function to the model's output
to calculate gradients and will apply them via the provided optimizer. It will
also supply the output of that model to one or more `tf.keras.metrics.Metric`
objects.
"""
def __init__(self,
train_dataset,
label_key,
model,
loss_fn,
optimizer,
metrics=None,
trainer_options=None):
"""Initializes a `SingleTaskTrainer` instance.
If the `SingleTaskTrainer` should run its model under a distribution
strategy, it should be created within that strategy's scope.
This trainer will also calculate metrics during training. The loss metric
is calculated by default, but other metrics can be passed to the `metrics`
arg.
Arguments:
train_dataset: A `tf.data.Dataset` or `DistributedDataset` that contains a
string-keyed dict of `Tensor`s.
label_key: The key corresponding to the label value in feature
dictionaries dequeued from `train_dataset`. This key will be removed
from the dictionary before it is passed to the model.
model: A `tf.Module` or Keras `Model` object to evaluate. It must accept a
`training` kwarg.
loss_fn: A per-element loss function of the form (target, output). The
output of this loss function will be reduced via `tf.reduce_mean` to
create the final loss. We recommend using the functions in the
`tf.keras.losses` package or `tf.keras.losses.Loss` objects with
`reduction=tf.keras.losses.reduction.NONE`.
optimizer: A `tf.keras.optimizers.Optimizer` instance.
metrics: A single `tf.keras.metrics.Metric` object, or a list of
`tf.keras.metrics.Metric` objects.
trainer_options: An optional `orbit.utils.StandardTrainerOptions` object.
"""
self.label_key = label_key
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
# Capture the strategy from the containing scope.
self.strategy = tf.distribute.get_strategy()
# We always want to report training loss.
self.train_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
# We need self.metrics to be an iterable later, so we handle that here.
if metrics is None:
self.metrics = []
elif isinstance(metrics, list):
self.metrics = metrics
else:
self.metrics = [metrics]
super(SingleTaskTrainer, self).__init__(
train_dataset=train_dataset, options=trainer_options)
def train_loop_begin(self):
"""Actions to take once, at the beginning of each train loop."""
self.train_loss.reset_states()
for metric in self.metrics:
metric.reset_states()
def train_step(self, iterator):
"""A train step. Called multiple times per train loop by the superclass."""
def train_fn(inputs):
with tf.GradientTape() as tape:
# Extract the target value and delete it from the input dict, so that
# the model never sees it.
target = inputs.pop(self.label_key)
# Get the outputs of the model.
output = self.model(inputs, training=True)
# Get the average per-batch loss and scale it down by the number of
# replicas. This ensures that we don't end up multiplying our loss by
# the number of workers - gradients are summed, not averaged, across
# replicas during the apply_gradients call.
# Note, the reduction of loss is explicitly handled and scaled by
# num_replicas_in_sync. Recommend to use a plain loss function.
# If you're using tf.keras.losses.Loss object, you may need to set
# reduction argument explicitly.
loss = tf.reduce_mean(self.loss_fn(target, output))
scaled_loss = loss / self.strategy.num_replicas_in_sync
# Get the gradients by applying the loss to the model's trainable
# variables.
gradients = tape.gradient(scaled_loss, self.model.trainable_variables)
# Apply the gradients via the optimizer.
self.optimizer.apply_gradients(
list(zip(gradients, self.model.trainable_variables)))
# Update metrics.
self.train_loss.update_state(loss)
for metric in self.metrics:
metric.update_state(target, output)
# This is needed to handle distributed computation.
self.strategy.run(train_fn, args=(next(iterator),))
def train_loop_end(self):
"""Actions to take once after a training loop."""
with self.strategy.scope():
# Export the metrics.
metrics = {metric.name: metric.result() for metric in self.metrics}
metrics[self.train_loss.name] = self.train_loss.result()
return metrics
| 5,647 | 39.056738 | 80 | py |
models | models-master/orbit/examples/single_task/single_task_evaluator_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_evaluator."""
import orbit
from orbit.examples.single_task import single_task_evaluator
from orbit.examples.single_task import single_task_trainer
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskEvaluatorTest(tf.test.TestCase):
def test_single_task_evaluation(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32)
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01))
evaluator = single_task_evaluator.SingleTaskEvaluator(
train_ds,
label_key='label',
model=model,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(train_ds.cardinality().numpy())
controller.evaluate()
accuracy = evaluator.metrics[0].result().numpy()
self.assertGreater(0.925, accuracy)
if __name__ == '__main__':
tf.test.main()
| 2,088 | 30.651515 | 80 | py |
cyphercat | cyphercat-master/scripts/test_train.py | from __future__ import print_function
try:
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from cyphercat.models import get_predef_model, weights_init
from cyphercat.train import *
from cyphercat.metrics import *
from cyphercat.datadefs import CCATDataset
from cyphercat.datadefs.cifar10_dataset import Cifar10_preload_and_split
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, ModelConfig, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/lfw.yml', help="Path to yaml")
args = parser.parse_args()
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
train_model_config = configr.train_model
# Training model params
train_config = ModelConfig(train_model_config)
model_name = train_config.name
# Datastruct for prepping data
data_struct = DataStruct(dataset_config)
# Simple download / unpacker function
prep_data(data_struct)
# Hyperparameters
n_epochs = train_config.epochs
batch_size = train_config.batchsize
learnrate = train_config.learnrate
loss = nn.CrossEntropyLoss()
# Data augmentation
train_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
splits = [0.4, 0.1, 0.25, 0.25]
data_name = data_struct.name
ccatset = CCATDataset(path=data_struct.save_path, name=data_name, splits=splits, transforms=[train_transform])
trainset = ccatset.get_split_n(0)
testset = ccatset.get_split_n(1)
n_classes = data_struct.n_classes
img_size = data_struct.height
n_in = data_struct.channels
## Define pyTorch ingestors for training and testing
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
#
## helper function to unnormalize and plot image
#def imshow(img):
# img = np.array(img)
# img = img / 2 + 0.5
# img = np.moveaxis(img, 0, -1)
# plt.imshow(img)
#
## display sample from dataset
#imgs,labels = iter(trainloader).next()
#imshow(torchvision.utils.make_grid(imgs))
# Prepare the model for training
model = get_predef_model(model_name)(n_in=n_in, n_classes=n_classes, n_filters=64, size=img_size)
model.to(device)
model.apply(weights_init)
model_optim = optim.Adam(model.parameters(), lr=learnrate/10)
# Train the model
train(model=model, data_loader=trainloader, test_loader=testloader,
optimizer=model_optim, criterion=loss, n_epochs=n_epochs, classes=None, verbose=False)
# Evaluate analytics on triaining and testing sets
print("\nPerformance on training set: ")
train_accuracy = eval_target_model(model, trainloader, classes=None)
print("\nPerformance on test set: ")
test_accuracy = eval_target_model(model, testloader, classes=None)
if __name__ == "__main__":
main()
| 4,030 | 32.31405 | 116 | py |
cyphercat | cyphercat-master/scripts/test_load.py | from __future__ import print_function
try:
import time
import os
import sys
import argparse
import torch
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/config.yml', help="Path to yaml")
#model_parse = parser.add_mutually_exclusive_group()
#model_parse.add_argument("-r", "--rand_rot_angle", dest="rand_rot_angle", default=0., type=float, help="Random image rotation angle range [deg]")
#model_parse.add_argument("-f", "--fixed_rot_angle", dest="fixed_rot_angle", nargs=3, type=float, help="(low, high, spacing) fixed image rotation angle [deg]")
args = parser.parse_args()
print("Testing")
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
# Directory structures for data and model saving
data_struct = DataStruct(dataset_config)
prep_data(dataset_config)
if __name__ == "__main__":
main()
| 1,500 | 26.290909 | 163 | py |
cyphercat | cyphercat-master/scripts/test_train_libri.py | from __future__ import print_function
try:
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from cyphercat.models import get_predef_model, weights_init
from cyphercat.train import *
from cyphercat.metrics import *
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, ModelConfig, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/librispeech.yml', help="Path to yaml")
args = parser.parse_args()
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
train_model_config = configr.train_model
# Datastruct for prepping data
data_struct = DataStruct(dataset_config)
# Simple download / unpacker function
prep_data(data_struct)
## Training model params
#train_config = ModelConfig(train_model_config)
#model_name = train_config.name
## Hyperparameters
#n_epochs = train_config.epochs
#batch_size = train_config.batchsize
#learnrate = train_config.learnrate
#loss = nn.CrossEntropyLoss()
#
## Data augmentation
#train_transform = torchvision.transforms.Compose([
# #torchvision.transforms.RandomRotation(10),
# #torchvision.transforms.RandomHorizontalFlip(),
# #torchvision.transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
#
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
#])
#
#test_transform = torchvision.transforms.Compose([
# #torchvision.transforms.Pad(2),
# torchvision.transforms.ToTensor(),
# #torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
#])
#
## Defined training and testing set splits
#trainset, testset = get_split_dataset(dataset_config=dataset_config, transforms=[train_transform, test_transform])
#n_classes = trainset.n_classes
#
## Define pyTorch ingestors for training and testing
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
#testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
#
## Prepare the model for training
#model = get_predef_model(model_name)(n_in=3, n_classes=n_classes, n_filters=64, size=250)
#model.to(device)
#model.apply(weights_init)
#model_optim = optim.Adam(model.parameters(), lr=learnrate/10)
## Train the model
#train(model=model, data_loader=trainloader, test_loader=testloader,
# optimizer=model_optim, criterion=loss, n_epochs=n_epochs, classes=None, verbose=False)
## Evaluate analytics on triaining and testing sets
#print("\nPerformance on training set: ")
#train_accuracy = eval_target_model(model, trainloader, classes=None)
#print("\nPerformance on test set: ")
#test_accuracy = eval_target_model(model, testloader, classes=None)
if __name__ == "__main__":
main()
| 3,781 | 33.697248 | 124 | py |
cyphercat | cyphercat-master/Utils/transformations.py | import torch
import librosa as libr
import numpy as np
class ToMFCC:
'''
Transformation to convert soundfile loaded via LibriSpeechDataset to Mel-
frequency cepstral coefficients (MFCCs)
Args:
number_of_mels: Number of bins to use for cepstral coefficients
Returns:
torch.float tensor
'''
def __init__(self, number_of_mels=128):
self.number_of_mels = number_of_mels
def __call__(self, y):
dims = y.shape
y = libr.feature.melspectrogram(np.reshape(y, (dims[1],)), 16000,
n_mels=self.number_of_mels, fmax=8000)
y = libr.feature.mfcc(S=libr.power_to_db(y))
y = torch.from_numpy(y)
return y.float()
class STFT:
'''
Short-time Fourier transform (STFT) for librosa dataset
Args:
phase: If true, will return the magnitude and phase of the transformation,
if false only returns magnitude
Returns:
torch.float tensor
'''
def __init__(self, phase=False):
self.phase = phase
def __call__(self, y):
dims = y.shape
y = libr.core.stft(np.reshape(y, (dims[1],)))
y, phase = np.abs(y), np.angle(y)
y = torch.from_numpy(y).permute(1, 0)
phase = torch.from_numpy(phase).permute(1, 0)
if self.phase:
return torch.cat( (y, phase), dim=0).float()
else:
return y.float()
| 1,449 | 28.591837 | 79 | py |
cyphercat | cyphercat-master/Utils/datasets.py | from torch.utils.data import Dataset
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
LIBRISPEECH_SAMPLING_RATE = 16000
PATH = '/home/mlomnitz/mlomnitz/Datasets'
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def to_categorical(y, num_classes):
"""Transforms an integer class label into a one-hot label (single integer to 1D vector)."""
if y >= num_classes:
raise(ValueError, 'Integer label is greater than the number of classes.')
one_hot = np.zeros(num_classes)
one_hot[y] = 1
return one_hot
def Libri_preload_and_split(path,subsets,seconds,pad=False,cache=True,splits = [.8,.2], attacking = False):
fragment_seconds = seconds
print('Initialising LibriSpeechDataset with minimum length = {}s and subsets = {}'.format(seconds, subsets))
# Convert subset to list if it is a string
# This allows to handle list of multiple subsets the same a single subset
if isinstance(subsets, str):
subsets = [subsets]
cached_df = []
found_cache = {s: False for s in subsets}
if cache:
# Check for cached files
for s in subsets:
subset_index_path = path + '/{}.index.csv'.format(s)
if os.path.exists(subset_index_path):
cached_df.append(pd.read_csv(subset_index_path))
found_cache[s] = True
# Index the remaining subsets if any
if all(found_cache.values()) and cache:
df = pd.concat(cached_df)
else:
df = pd.read_csv(path+'/LibriSpeech/SPEAKERS.TXT', skiprows=11, delimiter='|', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower() for col in df.columns]
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
name=df['name'].apply(lambda x: x.strip()),
)
audio_files = []
for subset, found in found_cache.items():
if not found:
audio_files += index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# # Concatenate with already existing dataframe if any exist
df = pd.concat(cached_df+[df])
# Save index files to data folder
for s in subsets:
df[df['subset'] == s].to_csv(path + '/{}.index.csv'.format(s), index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
num_speakers = len(df['id'].unique())
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id', 'minutes': 'speaker_minutes'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
print('Finished indexing data. {} usable files found.'.format(len(df)))
dfs = {} #dictionary of dataframes
#split df into data-subsets
if attacking == 1: #adversary 1, which requires an additional split for a shadow network
#splits unique speakers into three unequal parts.
# num speakers for train & test is the same.
# the below was solved with a system of equations
n = int(num_speakers//(2+2*splits[0]))#amt data depends on amt train dat
#n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits,2)
dfs = splitter(dfs,df,unique_speakers3, splits=[0.5,0.5],N = 4) #split out data for attack train + test evenly
elif attacking == 3: #adversary 3, which just requires in & out data
#splits unique speakers into two unequal parts.
# the below was solved with a system of equations
n = int(num_speakers//(1+splits[0]))#amt data depends on amt train dat
#n is train data for target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits=[1,0],N=2) #split out data for just attack eval
else: # just split into train & test
dfs = splitter(dfs, df,unique_speakers, splits, 0)
#check that the splits were as desired:
for d in dfs:
print(len(dfs[d]))
print('Finished splitting data.')
return dfs
def index_subset(path , subset):
"""
Index a subset by looping through all of it's files and recording their speaker ID, filepath and length.
:param subset: Name of the subset
:return: A list of dicts containing information about all the audio files in a particular subset of the
LibriSpeech dataset
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path + '/LibriSpeech/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.flac')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path + '/LibriSpeech/{}/'.format(subset)):
if len(files) == 0:
continue
librispeech_id = int(root.split('/')[-2])
for f in files:
# Skip non-sound files
if not f.endswith('.flac'):
continue
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.join(root, f),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def VOiCES_preload_and_split(path,subsets,seconds,pad=False,cache=True,splits = [.8,.2], attacking = False):
fragment_seconds = seconds
speakerF = 'Lab41-SRI-VOiCES-speaker-gender-dataset_SUBSET.csv'
print('Initialising VOiCES Dataset with minimum length = {}s and subsets = {}'.format(seconds, subsets))
for file in os.listdir(path):
if file.startswith("part"):
subsets.append(file)
# Convert subset to list if it is a string
# This allows to handle list of multiple subsets the same a single subset
if isinstance(subsets, str):
subsets = [subsets]
cached_df = []
found_cache = {s: False for s in subsets}
if cache:
# Check for cached files
# if data == 'Libri':
# for s in subsets:
# subset_index_path = path + '/{}.index.csv'.format(s)
# if os.path.exists(subset_index_path):
# print(subset_index_path)
# cached_df.append(pd.read_csv(subset_index_path))
# found_cache[s] = True
subset_index_path = path + '/index.csv'
if os.path.exists(subset_index_path):
cached_df.append(pd.read_csv(subset_index_path))
for s in subsets:
found_cache[s] = True
# Index the remaining subsets if any
if all(found_cache.values()) and cache:
df = pd.concat(cached_df)
else:
df = pd.read_csv(path+speakerF, skiprows=0, delimiter=',', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower() for col in df.columns]
df = df.rename(columns={'speaker': 'id', 'gender': 'sex','dataset':'subset'})
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
)
audio_files = []
for subset, found in found_cache.items():
if not found:
audio_files += index_subset_VOiCES(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# # Concatenate with already existing dataframe if any exist
df = pd.concat(cached_df+[df])
# Save index files to data folder
# if data == 'Libri':
# for s in subsets:
# df[df['subset'] == s].to_csv(path + '/{}.index.csv'.format(s), index=False)
df.to_csv(path + '/index.csv', index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
num_speakers = len(df['id'].unique())
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
print('Finished indexing data. {} usable files found.'.format(len(df)))
dfs = {} #dictionary of dataframes
#split df into data-subsets
if attacking == 1: #adversary 1, which requires an additional split for a shadow network
#splits unique speakers into three unequal parts.
# num speakers for train & test is the same.
# the below was solved with a system of equations
n = int(num_speakers//(2+2*splits[0]))#amt data depends on amt train dat
#n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits,2)
dfs = splitter(dfs,df,unique_speakers3, splits=[0.5,0.5],N = 4) #split out data for attack train + test evenly
elif attacking == 3: #adversary 3, which just requires in & out data
#splits unique speakers into two unequal parts.
# the below was solved with a system of equations
n = int(num_speakers//(1+splits[0]))#amt data depends on amt train dat
#n is train data for target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits=[1,0],N=2) #split out data for just attack eval
else: # just split into train & test
dfs = splitter(dfs, df,unique_speakers, splits, 0)
#check that the splits were as desired:
for d in dfs:
print(len(dfs[d]))
print('Finished splitting data.')
return dfs
def index_subset_VOiCES(path , subset):
"""
Index a subset by looping through all of it's files and recording their speaker ID, filepath and length.
:param subset: Name of the subset
:return: A list of dicts containing information about all the audio files in a particular subset of the
LibriSpeech dataset
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
addpath = ''
ftype = '.wav'
for root, folders, files in os.walk(path + addpath +'{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith(ftype)])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path + addpath + '{}/'.format(subset)):
if len(files) == 0:
continue
for f in files:
# Skip non-sound files
if not f.endswith(ftype):
continue
librispeech_id = int(f[f.index('sp')+2:f.index('sp')+6])
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.join(root, f),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def splitter(dfs,df,unique_speakers, splits,N):
#N is to keep track of the dataframe dict keys
n_splits = len(splits)
for speaker in unique_speakers: #for each speaker
# speaker = valid_sequence.unique_speakers[0]
tot_files = sum(df['speaker_id']==speaker)
mini_df = df[df['speaker_id']==speaker]
mini_df = mini_df.reset_index()
used_files = 0
start_file = 0
for idx, s in enumerate(splits): #for each split
if idx != n_splits-1:
n_files = int(s*tot_files)
used_files += n_files
else:
n_files = tot_files - used_files
#get stop index for the desired # of files:
stop_file = start_file + n_files
#initialize if first speaker, or append if later speaker
if speaker == unique_speakers[0]:
dfs[idx + N] = (mini_df.iloc[start_file:stop_file])
else:
dfs[idx + N] = dfs[idx + N].append(mini_df.iloc[start_file:stop_file])
#update start_file
start_file += n_files
for idx in range(n_splits): #for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
class LibriSpeechDataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset object. The __getitem__ function will return a raw audio
sample and it's label.
This class also contains functionality to build verification tasks and n-shot, k-way classification tasks.
# Arguments
subsets: What LibriSpeech datasets to include.
seconds: Minimum length of audio to include in the dataset. Any files smaller than this will be ignored.
downsampling:
label: One of {speaker, sex}. Whether to use sex or speaker ID as a label.
stochastic: bool. If True then we will take a random fragment from each file of sufficient length. If False we
will always take a fragment starting at the beginning of a file.
pad: bool. Whether or not to pad samples with 0s to get them to the desired length. If `stochastic` is True
then a random number of 0s will be appended/prepended to each side to pad the sequence to the desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, path, df, seconds, downsampling, label='speaker', stochastic=True, pad=False,
transform = None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\', \'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(self.datasetid_to_filepath[index])
# Choose a random sample of the file
if self.stochastic:
fragment_start_index = np.random.randint(0, max(len(instance)-self.fragment_length, 1))
else:
fragment_start_index = 0
instance = instance[fragment_start_index:fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length == self.fragment_length by appending a random number of 0s
# before and the appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len), 'constant')
else:
# Deterministic padding. Append 0s to reach self.fragment_length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\', \'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 18,446 | 38.082627 | 119 | py |
cyphercat | cyphercat-master/Utils/visualize_object_survey.py | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0),requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize( imgs_shape )
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
def bhook(module, gradInputs, gradOutputs):
global gcgrads
gcgrads = gradOutputs
def fhook(module, input, output):
global gcdata
gcdata = output
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label,layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform,x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1,2,0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):
guided = smooth_guided_saliency_map(model, input, label,transform = transform, plot = False)
gc = gradcam(model, input, label, layer_name = layer_name, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
| 8,615 | 31.513208 | 117 | py |
cyphercat | cyphercat-master/Utils/models.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import os.path
def new_size_conv(size, kernel, stride=1, padding=0):
return np.floor((size + 2*padding - (kernel -1)-1)/stride +1)
def new_size_max_pool(size, kernel, stride=None, padding=0):
if stride == None:
stride = kernel
return np.floor((size + 2*padding - (kernel -1)-1)/stride +1)
def calc_alexnet_size(size):
x = new_size_conv(size, 6,3,2)
x = new_size_max_pool(x,3,2)
x = new_size_conv(x,5,1,2)
x = new_size_max_pool(x,3,2)
x = new_size_conv(x,3,1,1)
x = new_size_conv(x,3,1,1)
x = new_size_conv(x,3,1,1)
out = new_size_max_pool(x,2,2)
return out
class AlexNet(nn.Module):
def __init__(self, n_classes, size=32):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=6, stride=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
out_feat_size = calc_alexnet_size(size)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * out_feat_size * out_feat_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, n_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class tiny_cnn(nn.Module):
def __init__(self, n_in=3, n_out=10, n_hidden=64, size=64):
super(tiny_cnn, self).__init__()
self.size = size
self.n_hidden = n_hidden
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_hidden, 2*n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(2*n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_hidden * (self.size//4) * (self.size//4), 2*n_hidden)
self.output = nn.Linear(2*n_hidden, n_out)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
#x = x.view(-1, 2*self.n_hidden * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
def calc_mlleaks_cnn_size(size):
x = new_size_conv(size, 5,1,2)
x = new_size_max_pool(x,2,2)
x = new_size_conv(x,5,1,2)
out = new_size_max_pool(x,2,2)
return out
class mlleaks_cnn(nn.Module):
def __init__(self, n_in=3, n_out=10, n_hidden=64, size=32):
super(mlleaks_cnn, self).__init__()
self.n_hidden = n_hidden
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_hidden, 2*n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(2*n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
fc_feature_size = calc_mlleaks_cnn_size(size)
self.fc = nn.Linear(int(2*n_hidden * fc_feature_size * fc_feature_size), 128)
self.output = nn.Linear(2*n_hidden, n_out)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
out = self.output(x)
return out
class ConvBlock(nn.Module):
#for audio_CNN_classifier
def __init__(self, n_input, n_out, kernel_size):
super(ConvBlock, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_CNN_classifier(nn.Module):
def __init__(self, in_size, n_hidden, n_classes):
super(audio_CNN_classifier, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(ConvBlock(in_size, 2*in_size, 3))
self.down_path.append(ConvBlock(2*in_size, 4*in_size, 3))
self.down_path.append(ConvBlock(4*in_size, 8*in_size, 3))
self.fc = nn.Sequential(
nn.Linear(8*in_size, n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
class STFT_CNN_classifier(nn.Module):
def __init__(self, in_size, n_hidden, n_classes):
super(STFT_CNN_classifier, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(ConvBlock(in_size, in_size, 7))
self.down_path.append(ConvBlock(in_size, in_size*2, 7))
self.down_path.append(ConvBlock(in_size*2, in_size*4, 7))
self.fc = nn.Sequential(
nn.Linear(5264, n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
class mlleaks_mlp(nn.Module):
def __init__(self, n_in=3, n_out=1, n_hidden=64):
super(mlleaks_mlp, self).__init__()
self.hidden = nn.Linear(n_in, n_hidden)
#self.bn = nn.BatchNorm1d(n_hidden)
self.output = nn.Linear(n_hidden, n_out)
def forward(self, x):
x = F.sigmoid(self.hidden(x))
#x = self.bn(x)
out = self.output(x)
#out = F.sigmoid(self.output(x))
return out
class cnn(nn.Module):
def __init__(self, in_channels, out_channels, n_filters):
super(cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(in_channels, n_filters, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters, height/2, width/2]
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, n_filters*2, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters*2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters*2, height/4, width/4]
self.dense_block_1 = nn.Sequential(
##nn.Linear(n_filters * 2 * 8 * 8, 64),
nn.Linear(n_filters*2 * 8 * 8, 128),
##nn.BatchNorm1d(64),
##nn.ReLU(inplace=True)
)
# shape = [Batch_size, 64]
self.dense_block_2 = nn.Sequential(
nn.Linear(64, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(32, out_channels),
nn.BatchNorm1d(out_channels)
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, self.n_filters*2 * 8 * 8)
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class mlp(nn.Module):
def __init__(self, in_channels, out_channels, n_filters):
super(mlp, self).__init__()
self.n_filters = n_filters
# shape = [Batch_size, k (top k posteriors)]
self.dense_block_1 = nn.Sequential(
nn.Linear(in_channels, n_filters*2),
#nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, n_filters*2]
self.dense_block_2 = nn.Sequential(
nn.Linear(n_filters*2, n_filters*2),
#nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(n_filters*2, out_channels),
#nn.BatchNorm1d(out_channels),
nn.Sigmoid()
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class audio_cnn_block(nn.Module):
'''
1D convolution block used to build audio cnn classifiers
Args:
input: input channels
output: output channels
kernel_size: convolution kernel size
'''
def __init__(self, n_input, n_out, kernel_size):
super(audio_cnn_block, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_tiny_cnn(nn.Module):
'''
Template for convolutional audio classifiers.
'''
def __init__(self, cnn_sizes, n_hidden, kernel_size, n_classes):
'''
Init
Args:
cnn_sizes: List of sizes for the convolution blocks
n_hidden: number of hidden units in the first fully connected layer
kernel_size: convolution kernel size
n_classes: number of speakers to classify
'''
super(audio_tiny_cnn, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(audio_cnn_block(cnn_sizes[0], cnn_sizes[1],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[1], cnn_sizes[2],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[2], cnn_sizes[3],
kernel_size,))
self.fc = nn.Sequential(
nn.Linear(cnn_sizes[4], n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
def MFCC_cnn_classifier(n_classes):
'''
Builds speaker classifier that ingests MFCC's
'''
in_size = 20
n_hidden = 512
sizes_list = [in_size, 2*in_size, 4*in_size, 8*in_size, 8*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=3, n_classes=125)
def ft_cnn_classifer(n_classes):
'''
Builds speaker classifier that ingests the abs value of fourier transforms
'''
in_size = 94
n_hidden = 512
sizes_list = [in_size, in_size, 2*in_size, 4*in_size, 14*4*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=7, n_classes=125)
class RNN(torch.nn.Module):
'''
Bidirectional LSTM for sentiment analysis
'''
def __init__(self, vocab_size, embedding_size, hidden_size, output_size, n_layers=2, bidirectional=True, dropout=0.5):
super(RNN, self).__init__()
self.embedding = torch.nn.Embedding(vocab_size, embedding_size)
self.rnn = torch.nn.LSTM(embedding_size, hidden_size, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout)
self.fc = torch.nn.Linear(hidden_size*2, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x):
embedded = self.dropout(self.embedding(x))
output, (hidden, cell) = self.rnn(embedded)
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
return self.fc(hidden.squeeze(0))
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
def save_checkpoint(model=None, optimizer=None, epoch=None,
data_descriptor=None, loss=None, accuracy=None, path='./',
filename='checkpoint', ext='.pth.tar'):
state = {
'epoch': epoch,
'arch': str(model.type),
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'loss': loss,
'accuracy': accuracy,
'dataset': data_descriptor
}
torch.save(state, path+filename+ext)
def load_checkpoint(model=None, optimizer=None, checkpoint=None):
assert os.path.isfile(checkpoint), 'Checkpoint not found, aborting load'
chpt = torch.load(checkpoint)
assert str(model.type) == chpt['arch'], 'Model arquitecture mismatch,\
aborting load'
model.load_state_dict(chpt['state_dict'])
if optimizer is not None:
optimizer.load_state_dict['optimizer']
print('Succesfully loaded checkpoint \nDataset: %s \nEpoch: %s \nLoss: %s\
\nAccuracy: %s' % (chpt['dataset'], chpt['epoch'], chpt['loss'],
chpt['accuracy']))
| 14,560 | 32.018141 | 128 | py |
cyphercat | cyphercat-master/Utils/metrics.py | import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from SVC_Utils import *
from sklearn.metrics import roc_curve, auc
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_net(net, testloader, classes=None):
if classes is not None:
class_correct = np.zeros(10)
class_total = np.zeros(10)
total = 0
correct = 0
with torch.no_grad():
net.eval()
for i, (imgs, lbls) in enumerate(testloader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = net(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of %s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
print("\nAccuracy = %.2f %%\n\n" % (accuracy) )
return accuracy
def eval_attack_net(attack_net, target, target_train, target_out, k):
"""Assess accuracy, precision, and recall of attack model for in training set/out of training set classification.
Edited for use with SVCs."""
in_predicts=[]
out_predicts=[]
losses = []
if type(target) is not Pipeline:
target_net=target
target_net.eval()
attack_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
#[mini_batch_size x num_classes] tensors, (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs=train_imgs.view(train_imgs.shape[0], -1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
train_posteriors=torch.from_numpy(target.predict_proba(traininputs)).float()
out_posteriors=torch.from_numpy(target.predict_proba(outinputs)).float()
else:
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
#[k x mini_batch_size] tensors, (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
#Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
#print(train_top.shape)
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
#Takes in probabilities for top k most likely classes, outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
def eval_attack_roc(attack_net, target_net, target_train, target_out, k):
losses = []
target_net.eval()
attack_net.eval()
total = 0
correct = 0
train_top = np.empty((0,2))
out_top = np.empty((0,2))
true_positives = 0
false_positives = 0
false_negatives = 0
predictions = np.array([])
labels = np.array([])
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
train_size = train_imgs.shape[0]
out_size = out_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
train_lbl = torch.ones(train_size).to(device)
out_lbl = torch.zeros(out_size).to(device)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
predictions = np.concatenate((predictions, train_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.ones(train_size)), axis=0)
predictions = np.concatenate((predictions, out_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.zeros(out_size)), axis=0)
#print("train_predictions = ",train_predictions)
#print("out_predictions = ",out_predictions)
true_positives += (train_predictions >= 0.5).sum().item()
false_positives += (out_predictions >= 0.5).sum().item()
false_negatives += (train_predictions < 0.5).sum().item()
correct += (train_predictions>=0.5).sum().item()
correct += (out_predictions<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
accuracy = 100 * correct / total
precision = true_positives / (true_positives + false_positives) if true_positives + false_positives != 0 else 0
recall = true_positives / (true_positives + false_negatives) if true_positives + false_negatives !=0 else 0
print("Membership Inference Performance")
print("Accuracy = %.2f%%, Precision = %.2f, Recall = %.2f" % (accuracy, precision, recall))
fpr, tpr, thresholds = roc_curve(labels, predictions, pos_label=1)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
def eval_membership_inference(target_net, target_train, target_out):
target_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:,0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:,0].clone().to(device)
#print(train_top.shape)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
| 11,084 | 35.22549 | 134 | py |
cyphercat | cyphercat-master/Utils/SVC_Utils.py | import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
import torch
import torchvision
def load(dataloader):
"""Loads/flattens inputs and targets for use in SVM. Returns inputs and targets."""
for data in dataloader:
x,y=data
x=x.view(x.shape[0],-1)
return x,y
def hp_grid(n_components, C_range, gamma_range):
"""Creates and returns list of classifiers with grid of hyperparameters given by C_range and gamma_range."""
clfs=[]
pca=PCA(n_components=n_components)
scaling = MinMaxScaler(feature_range=(-1,1))
for i in C_range:
for j in gamma_range:
svc=svm.SVC(C=i, gamma=j)
clf=make_pipeline(pca, scaling, svc)
clfs.append(clf)
return clfs
def train_grid(clfs, inputs, targets):
"""Trains classifiers in a list; returns list of trained classifiers."""
fitted_clfs=[]
for i in range(len(clfs)):
x=clfs[i].fit(inputs, targets)
fitted_clfs.append(x)
print('Fitted: ', i+1, '/', len(clfs))
return fitted_clfs
def predict_eval(clf, inputs, targets, training=False):
"""Given a classifier and inputs, returns predictions and evaluated classifier accuracy."""
preds=clf.predict(inputs)
num_correct=torch.eq(torch.from_numpy(preds), targets).sum().item()
acc=(num_correct/len(targets))*100
if training:
print('C: ', clf.get_params(deep=True)['svc__C'], 'gamma: ', clf.get_params(deep=True)['svc__gamma'])
print('Training Accuracy: ', acc)
else:
print('Testing Accuracy: ', acc)
return preds, acc
def maxacc_gen(test_accs, train_accs, clfs):
"""Finds and returns model with highest test accuracy and model with train/test accuracy ratio closest to 1."""
test=np.array(test_accs)
train=np.array(train_accs)
maxacc=clfs[np.argmax(test)]
gen=clfs[np.argmin(train-test)]
return maxacc, gen
def save_proba(fn, pipe, inputs, targets):
"""Fits svm with probabilities and saves to disk."""
params=pipe.get_params(deep=True)
pca=PCA(n_components=180)
scaling = MinMaxScaler(feature_range=(-1,1))
pipe_prob=make_pipeline(pca, scaling, svm.SVC(C=params['svc__C'], gamma=params['svc__gamma'], probability=True))
pipe_prob.fit(inputs, targets)
joblib.dump(pipe_prob, fn)
def load_svm(directory, gen=True):
"""Returns loaded SVM saved with classification baselines.
'gen' : Model with train/test accuracy ratio closest to 1.
'maxacc' : Model with highest test accuracy."""
if gen:
clf='gen'
if not gen:
clf='maxacc'
dataset=directory.split('/')[-1]
path='SVM' + dataset + '_' + clf + '_proba.pkl'
svm=joblib.load(os.path.join(directory, path))
return svm
def class_acc(preds, targets, classes):
"Returns classifier accuracy for each class."
correct=0
class_correct=np.zeros(len(classes))
class_total=np.zeros(len(classes))
for j in range(len(targets)):
class_total[targets[j]]+=1
if np.argmax(preds[j])==targets[j]:
class_correct[targets[j]]+=1
correct+=1
class_accuracies=(class_correct/class_total)*100
accuracy=(correct/len(targets))*100
for i in range(len(class_accuracies)):
print('Accuracy of', classes[i], ': ', class_accuracies[i], '%')
print('Total Accuracy: ', accuracy, '%')
| 3,655 | 30.791304 | 116 | py |
cyphercat | cyphercat-master/Utils/train.py | import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from metrics import *
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(net, data_loader, test_loader, optimizer, criterion, n_epochs, classes=None, verbose=False):
losses = []
for epoch in range(n_epochs):
net.train()
for i, batch in enumerate(data_loader):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(imgs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[%d/%d][%d/%d] loss = %f" % (epoch, n_epochs, i, len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[%d/%d]" %(epoch, n_epochs))
print("Training:")
train_accuracy = eval_target_net(net, data_loader, classes=classes)
print("Test:")
test_accuracy = eval_target_net(net, test_loader, classes=classes)
#plt.plot(losses)
#plt.show()
return train_accuracy, test_accuracy
def train_attacker(attack_net, shadow, shadow_train, shadow_out, optimizer, criterion, n_epochs, k):
"""
Trains attack model (classifies a sample as in or out of training set) using
shadow model outputs (probabilities for sample class predictions).
The type of shadow model used can vary.
"""
in_predicts=[]
out_predicts=[]
losses = []
if type(shadow) is not Pipeline:
shadow_net=shadow
shadow_net.eval()
for epoch in range(n_epochs):
total = 0
correct = 0
#train_top = np.array([])
#train_top = []
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(shadow_train, shadow_out)):
if train_imgs.shape[0] != out_imgs.shape[0]:
break
#######out_imgs = torch.randn(out_imgs.shape)
mini_batch_size = train_imgs.shape[0]
if type(shadow) is not Pipeline:
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(shadow_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(shadow_net(out_imgs.detach()), dim=1)
else:
traininputs= train_imgs.view(train_imgs.shape[0],-1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
in_preds=shadow.predict_proba(traininputs)
train_posteriors=torch.from_numpy(in_preds).float()
#for p in in_preds:
# in_predicts.append(p.max())
out_preds=shadow.predict_proba(outinputs)
out_posteriors=torch.from_numpy(out_preds).float()
#for p in out_preds:
# out_predicts.append(p.max())
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
for p in train_top_k:
in_predicts.append((p.max()).item())
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
for p in out_top_k:
out_predicts.append((p.max()).item())
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
optimizer.zero_grad()
train_predictions = torch.squeeze(attack_net(train_top_k))
out_predictions = torch.squeeze(attack_net(out_top_k))
loss_train = criterion(train_predictions, train_lbl)
loss_out = criterion(out_predictions, out_lbl)
loss = (loss_train + loss_out) / 2
if type(shadow) is not Pipeline:
loss.backward()
optimizer.step()
correct += (F.sigmoid(train_predictions)>=0.5).sum().item()
correct += (F.sigmoid(out_predictions)<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
print("[%d/%d][%d/%d] loss = %.2f, accuracy = %.2f" % (epoch, n_epochs, i, len(shadow_train), loss.item(), 100 * correct / total))
#Plot distributions for target predictions in training set and out of training set
"""
fig, ax = plt.subplots(2,1)
plt.subplot(2,1,1)
plt.hist(in_predicts, bins='auto')
plt.title('In')
plt.subplot(2,1,2)
plt.hist(out_predicts, bins='auto')
plt.title('Out')
"""
'''
plt.scatter(out_top.T[0,:], out_top.T[1,:], c='b')
plt.scatter(train_top.T[0,:], train_top.T[1,:], c='r')
plt.show()
'''
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha*torch.nn.KLDivLoss(size_average=False)(F.log_softmax(inputs, dim=1),
F.softmax(target, dim=1))
+ (1-self.alpha)*F.cross_entropy(inputs, true_labels)
return KD_loss
def distill_training(teacher, learner, data_loader, test_loader, optimizer,
criterion, n_epochs, verbose=False):
"""
:param teacher: network to provide soft labels in training
:param learner: network to distill knowledge into
:param data_loader: data loader for training data set
:param test_loaderL data loader for validation data
:param optimizer: optimizer for training
:param criterion: objective function, should allow for soft labels.
We suggest softCrossEntropy
:param n_epochs: epochs for training
:param verbose: verbose == True will print loss at each batch
:return: None, teacher model is trained in place
"""
losses = []
for epoch in range(n_epochs):
teacher.eval()
learner.train()
for i, batch in enumerate(data_loader):
with torch.set_grad_enabled(False):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
soft_lables = teacher(imgs)
with torch.set_grad_enabled(True):
optimizer.zero_grad()
outputs = learner(imgs)
loss = criterion(outputs, soft_lables, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[%d/%d][%d/%d] loss = %f" % (epoch, n_epochs, i,
len(data_loader),
loss.item()))
# evaluate performance on testset at the end of each epoch
print("[%d/%d]" %(epoch, n_epochs))
print("Training:")
eval_target_net(learner, data_loader, classes=None)
print("Test:")
eval_target_net(learner, test_loader, classes=None)
# plt.plot(losses)
# plt.show()
| 8,087 | 35.432432 | 142 | py |
cyphercat | cyphercat-master/cyphercat/defenses.py | import copy
# Torch imports
import torch
import torch.optim as optim
# Local imports
from .train import train, softCrossEntropy
from .metrics import eval_target_model
def transfer_learn(model=None, data_loader=None, test_loader=None,
optimizer=None, criterion=None, lr=0, n_epochs=0,
unfreeze_layers=None, fine_tune=True, verbose=False):
"""Routine to perform transfer learning given a model and a new dataset.
Args:
model (nn.Module): Pretrained model.
data_loader (Dataloader): Dataloader pointing to training dataset.
test_loader (Dataloader): Dataloader poinitng to validation dataset.
optimizer (config): This remains to be implemented
criterion (nn.Module): Criterion for loss calculation.
lr (float): Learning rate for training.
n_epochs (int): Maximum number of epochs during training of last
layers and fine-tunning step.
unfreze_layers ((int, int)): Tuple with indices (first, last) of
layers to unfreeze during first stage of training.
fine_tune (bool): If true will do a second stage of training with all
of the layers unfrozen and 1/10th of original learning rate.
verbose (bool): If True will print loss at each training step.
Returns:
Todos:
Implement generalized optimizer, loaded from configuration. Currentl
hardcoded to SGD.
"""
unfrozen = []
param_list = list()
for idx, mod in enumerate(model._modules.items()):
if unfreeze_layers[0] <= idx <= unfreeze_layers[1]:
param_list += list(mod[1].parameters())
unfrozen.append(mod[0])
for param in mod[1].parameters():
param.requires_grad = True
else:
for param in mod[1].parameters():
param.requires_grad = False
print('Training parameters in modules:')
for x in unfrozen:
print('\t %s' % x)
optimizer = optim.SGD(params=param_list, lr=lr, momentum=0.9)
train(model=model, data_loader=data_loader, test_loader=test_loader,
optimizer=optimizer, criterion=criterion, n_epochs=n_epochs,
verbose=verbose)
print('Finished training last layers, performance: \n'
'Training: %lf \nTest: %lf' % (
eval_target_model(model=model, data_loader=data_loader),
eval_target_model(model=model, data_loader=test_loader)))
if fine_tune is False:
return
print('Moving on to fine tunning entire network')
for param in model.parameters():
param.requires_grad = True
optimizer = optim.SGD(params=model.parameters(), lr=lr/10., momentum=0.9)
train(model=model, data_loader=data_loader, test_loader=test_loader,
optimizer=optimizer, criterion=criterion, n_epochs=n_epochs,
verbose=verbose)
print('Finished training last layers, performance: \n'
'Training: %lf \nTest: %lf' % (
eval_target_model(model=model, data_loader=data_loader),
eval_target_model(model=model, data_loader=test_loader)))
return
class dimensionality_reduction():
"""Returns a wrapped model that will return only the top-n_dim most
probable classes during inference.
"""
def __init__(self, model=None, n_top=1, break_posterior=False):
""" Initializes the wrapped model.
Args:
model (nn.Module): Original, trained model to defend.
n_dim (int): New dimensionality, i.e. the number of top ranked
labels to return.
break_posterior (bool): If true, will return fixed posterior
values instead model calculated values.
Returns:
"""
self.model = copy.deepcopy(model)
self.n_top = n_top
self.in_eval = False
self.break_posterior = break_posterior
def __call__(self, x):
"""Calls the model on input x and returns the reduced (n_top) output
Args:
x (torch.tensor): Same as any model input
Returns:
(torch.tensor): Returns (n_top,) dimensional torch.tensor with
scores on top classes.
"""
output = self.model(x)
if self.in_eval is False:
return output
reduced = torch.zeros(output.shape)
arr = output.detach().cpu().numpy()
to_del = arr.argsort(axis=1)[:, -self.n_top:]
for idx, img in enumerate(to_del):
for idy, label in enumerate(img[::-1]):
if self.break_posterior:
reduced[idx][label] = 1./(idy+1)
else:
reduced[idx][label] = output[idx][label]
return reduced
def eval(self):
"""Sets the model and wrapper to eval mode
"""
self.in_eval = True
self.model.eval()
def train(self):
"""Sets the model and wrapped to train mode
"""
self.in_eval = False
self.model.train()
def distill_model(teacher=None, student=None, data_loader=None,
test_loader=None, optimizer=None,
criterion=softCrossEntropy(), n_epochs=0, T=1.,
verbose=False):
"""Performs defensive distillation at desired temperature
Args:
teacher (nn.Module): Teacher model used to in distillation.
student (nn.Module): Student model into which to distill. If left as
None will copy and randomly initialize the teacher.
data_loader (Dataloader): Dataloader pointing to training dataset.
test_loader (Dataloader): Dataloader poinitng to validation dataset.
optimizer (nn.optim): Optimizer for distillation.
criterion (nn.Module): Criterion for loss calculation. Default is
softCrossEntropy(alpha = 0.95)
n_epochs (int): Maximum number of epochs during distillation.
T (int): Distillation temperature. Assumes the teacher was trained at
the same temperature.
verbose (bool): If True will output loss at each training step.
"""
| 6,149 | 37.198758 | 78 | py |
cyphercat | cyphercat-master/cyphercat/models.py | import os
import torch
import numpy as np
from torch import nn
import torch.nn.functional as fcnal
def new_size_conv(size, kernel, stride=1, padding=0):
"""Calculates the output size of a convolutional layer
Args:
size (int): Size of input (assumed square).
kernel (int): kernel size for convolution (assumed square).
stride (int): Convolution stride.
padding (int): Padding used in convolution.
Returns:
(int): Returns the output size of a theoritical convolution.
"""
return np.floor((size + 2*padding - (kernel - 1)-1)/stride + 1)
def new_size_max_pool(size, kernel, stride=None, padding=0):
"""Calculates the output size of a maxpool operation.
Args:
size (int): Input size (assumed square).
kernel (int): Maxpool kernel size (assumed square).
stride (int): Maxpool stride.
padding (int): Maxpool padding.
Returns:
(int): Returns the output size of a theoritical maxpool layer.
"""
if stride is None:
stride = kernel
return np.floor((size + 2*padding - (kernel - 1)-1)/stride + 1)
class AlexNet(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=32):
super(AlexNet, self).__init__()
n_h1 = 3 * n_filters
n_h2 = 2 * n_h1
self.features = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=6, stride=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(n_filters, n_h1, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(n_h1, n_h2, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(n_h2, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
out_feat_size = self.calc_alexnet_size(size)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * out_feat_size * out_feat_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, n_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def calc_alexnet_size(self, size):
x = new_size_conv(size, 6, 3, 2)
x = new_size_max_pool(x, 3, 2)
x = new_size_conv(x, 5, 1, 2)
x = new_size_max_pool(x, 3, 2)
x = new_size_conv(x, 3, 1, 1)
x = new_size_conv(x, 3, 1, 1)
x = new_size_conv(x, 3, 1, 1)
out = new_size_max_pool(x, 2, 2)
return out
class tiny_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=64):
super(tiny_cnn, self).__init__()
self.size = size
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.BatchNorm2d(2*n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * (self.size//4) * (self.size//4),
2*n_filters)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
# x = x.view(-1, 2*self.n_filters * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
class mlleaks_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=128):
super(mlleaks_cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.BatchNorm2d(2*n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * 8 * 8, size)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, 2*self.n_filters * 8 * 8)
x = self.fc(x)
out = self.output(x)
return out
class mlleaks_mlp(nn.Module):
def __init__(self, n_in=3, n_classes=1, n_filters=64, size=64):
super(mlleaks_mlp, self).__init__()
self.hidden = nn.Linear(n_in, n_filters)
# self.bn = nn.BatchNorm1d(n_filters)
self.output = nn.Linear(n_filters, n_classes)
def forward(self, x):
x = fcnal.sigmoid(self.hidden(x))
# x = self.bn(x)
out = self.output(x)
out = fcnal.sigmoid(self.output(x))
return out
class cnn(nn.Module):
def __init__(self, n_in, n_classes, n_filters, size):
super(cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters, height/2, width/2]
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, n_filters*2, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters*2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters*2, height/4, width/4]
self.dense_block_1 = nn.Sequential(
# nn.Linear(n_filters * 2 * 8 * 8, 64),
nn.Linear(n_filters*2 * 8 * 8, 128),
# nn.BatchNorm1d(64),
# nn.ReLU(inplace=True)
)
# shape = [Batch_size, 64]
self.dense_block_2 = nn.Sequential(
nn.Linear(64, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(32, n_classes),
nn.BatchNorm1d(n_classes)
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, self.n_filters*2 * 8 * 8)
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class mlp(nn.Module):
def __init__(self, n_in, n_classes, n_filters, size):
super(mlp, self).__init__()
self.n_filters = n_filters
# shape = [Batch_size, k (top k posteriors)]
self.dense_block_1 = nn.Sequential(
nn.Linear(n_in, n_filters*2),
# nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, n_filters*2]
self.dense_block_2 = nn.Sequential(
nn.Linear(n_filters*2, n_filters*2),
# nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(n_filters*2, n_classes),
# nn.BatchNorm1d(n_classes),
nn.Sigmoid()
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class audio_cnn_block(nn.Module):
'''
1D convolution block used to build audio cnn classifiers
Args:
input: input channels
output: output channels
kernel_size: convolution kernel size
'''
def __init__(self, n_input, n_out, kernel_size):
super(audio_cnn_block, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_tiny_cnn(nn.Module):
'''
Template for convolutional audio classifiers.
'''
def __init__(self, cnn_sizes, n_hidden, kernel_size, n_classes):
'''
Init
Args:
cnn_sizes: List of sizes for the convolution blocks
n_hidden: number of hidden units in the first fully connected layer
kernel_size: convolution kernel size
n_classes: number of speakers to classify
'''
super(audio_tiny_cnn, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(audio_cnn_block(cnn_sizes[0], cnn_sizes[1],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[1], cnn_sizes[2],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[2], cnn_sizes[3],
kernel_size,))
self.fc = nn.Sequential(
nn.Linear(cnn_sizes[4], n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
def MFCC_cnn_classifier(n_classes=125):
'''
Builds speaker classifier that ingests MFCC's
'''
in_size = 20
n_hidden = 512
sizes_list = [in_size, 2*in_size, 4*in_size, 8*in_size, 8*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=3, n_classes=n_classes)
def ft_cnn_classifer(n_classes=125):
'''
Builds speaker classifier that ingests the abs value of fourier transforms
'''
in_size = 94
n_hidden = 512
sizes_list = [in_size, in_size, 2*in_size, 4*in_size, 14*4*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=7, n_classes=n_classes)
def weights_init(m):
"""
Initializes weights of layers of model m
"""
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
# Dictionary for access of models defined above
PREDEF_MODELS = {"alexnet" : AlexNet,
"cnn" : cnn,
"tiny_cnn" : tiny_cnn,
"mlleaks_cnn" : mlleaks_cnn,
"mlp" : mlp,
"mlleaks_mlp" : mlleaks_mlp}
def get_predef_model(name=""):
"""
Convenience function for retreiving predefined model arch
Parameters
----------
name : {'alexnet', 'cnn', 'tiny_cnn', 'mlleaks_cnn', 'mlp', 'mlleaks_mlp'}
Name of model
Returns
-------
model : Model
Predefined model arch
"""
name = name.lower()
if name in PREDEF_MODELS:
model = PREDEF_MODELS[name]
return model
else:
raise ValueError('Invalid predefined model, {}, requested.'
' Must be in {}'.format(name, PREDEF_MODELS.keys()))
def save_checkpoint(model=None, optimizer=None, epoch=None,
data_descriptor=None, loss=None, accuracy=None, path='./',
filename='checkpoint', ext='.pth.tar'):
"""Saves model and optimizer state to a desired checkpoint file.
Args:
model (nn.Module): Model to save.
optimizer (nn.optim): Optimizer used to train the model.
epoch (int): Training epoch of current model and optimizer state.
data_descriptor (str): Description of the data used to train the model.
loss (int): Model loss at last training step.
accuracy (list(int)): List of model training, validation... accuracy.
path (str): Path to desired directory for checkpoint.
filename (str): Checkpoint name.
ext (str): Extension for checkpoint file (suggested 'pth.tar' or 'pth')
Returns:
"""
state = {
'epoch': epoch,
'arch': str(model.type),
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'loss': loss,
'accuracy': accuracy,
'dataset': data_descriptor
}
torch.save(state, path+filename+ext)
def load_checkpoint(model=None, optimizer=None, checkpoint=None):
"""Loads a checkpoint into a model saved using save_chekcpoint function.
Args:
model (nn.Module): Model into which to load the weights (should match
the saved arquitecture).
optimizer (nn.optim): Optimizer into which to load the saved optimizer
state.
checkpoint (str): Path to the checkpoint file.
Returns:
(dict): Returns the loaded dicitonary
"""
assert os.path.isfile(checkpoint), 'Checkpoint not found, aborting load'
chpt = torch.load(checkpoint)
assert str(model.type) == chpt['arch'], 'Model arquitecture mismatch,\
aborting load'
model.load_state_dict(chpt['state_dict'])
if optimizer is not None:
optimizer.load_state_dict['optimizer']
print('Succesfully loaded checkpoint \nDataset: {} \nEpoch: {} \nLoss: {}\
\nAccuracy: {}'.format(chpt['dataset'], chpt['epoch'], chpt['loss'],
chpt['accuracy']))
return chpt
| 14,856 | 31.509847 | 79 | py |
cyphercat | cyphercat-master/cyphercat/metrics.py | import torch
import torch.nn.functional as fcnal
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_model(model=None, data_loader=None, classes=None):
"""
Function to evaluate a target model provided
specified data sets.
Parameters
----------
model : Module
PyTorch conforming nn.Module function
data_loader : DataLoader
PyTorch dataloader function
classes : list
list of classes
Returns
-------
accuracy : float
accuracy of target model
"""
if classes is not None:
n_classes = len(classes)
class_correct = np.zeros(n_classes)
class_total = np.zeros(n_classes)
total = 0
correct = 0
with torch.no_grad():
model.eval()
for i, (imgs, lbls) in enumerate(data_loader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = model(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of {} : {:.2f} %%'
.format(classes[i],
100 * class_correct[i] / class_total[i]))
print("\nAccuracy = {:.2f} %%\n\n".format(accuracy))
return accuracy
def eval_attack_model(attack_model=None, target=None,
target_train=None, target_out=None, k=0, verbose=False):
"""
Assess accuracy, precision, and recall of attack model
for in training set/out of training set classification.
Edited for use with SVCs.
Parameters
----------
attack_model : Module
PyTorch conforming nn.Module function
target : Module
PyTorch conforming nn.Module function
target_train : DataLoader
PyTorch dataloader function
target_out : DataLoader
PyTorch dataloader function
k : int
Value at which to end using train data list
"""
in_predicts = []
out_predicts = []
if type(target) is not Pipeline:
target_model = target
target_model.eval()
attack_model.eval()
precisions = []
recalls = []
accuracies = []
# For threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0, 2))
out_top = np.empty((0, 2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train,
target_out)):
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
# [mini_batch_size x num_classes] tensors,
# (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs = train_imgs.view(train_imgs.shape[0], -1)
outinputs = out_imgs.view(out_imgs.shape[0], -1)
train_posteriors = torch.from_numpy(
target.predict_proba(traininputs)).float()
out_posteriors = torch.from_numpy(
target.predict_proba(outinputs)).float()
else:
train_posteriors = fcnal.softmax(target_model(
train_imgs.detach()), dim=1)
out_posteriors = fcnal.softmax(target_model(
out_imgs.detach()), dim=1)
# [k x mini_batch_size] tensors,
# (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:, :k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:, :k].clone().to(device)
# Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,
train_top_k[:, :2].cpu().detach().numpy()))
out_top = np.vstack((out_top,
out_top_k[:, :2].cpu().detach().numpy()))
# Takes in probabilities for top k most likely classes,
# outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = torch.squeeze(attack_model(train_top_k))
out_predictions = torch.squeeze(attack_model(out_top_k))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = (true_positives[j] / (true_positives[j] +
false_positives[j])
if true_positives[j] + false_positives[j] != 0 else 0)
recall = (true_positives[j] / (true_positives[j] + false_negatives[j])
if true_positives[j] + false_negatives[j] != 0 else 0)
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
if verbose:
print("threshold = %.4f, acc. = %.2f, precision = %.2f, \
recall = %.2f" % (t, accuracy, precision, recall))
# Make a dataframe of precision & recall results
data = np.transpose([thresholds, accuracies, precisions, recalls])
df_pr = pd.DataFrame(columns=['Thresholds', 'Accuracy', 'Precision',
'Recall'], data=data)
return df_pr
def eval_membership_inference(target_model=None,
target_train=None, target_out=None):
"""
Function to evaluate a target model for
membership inference.
Parameters
----------
target_model : Module
PyTorch conforming nn.Module function
target_train : DataLoader
PyTorch dataloader function
target_out : DataLoader
PyTorch dataloader function
"""
target_model.eval()
precisions = []
recalls = []
accuracies = []
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train,
target_out)):
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = fcnal.softmax(
target_model(train_imgs.detach()), dim=1)
out_posteriors = fcnal.softmax(
target_model(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:, 0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:, 0].clone().to(device)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = 0
if true_positives[j] + false_positives[j] != 0:
precision = (true_positives[j] / (true_positives[j]
+ false_positives[j]))
recall = 0
if true_positives[j] + false_negatives[j] != 0:
recall = (true_positives[j] / (true_positives[j]
+ false_negatives[j]))
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = {:.4f}, accuracy = {:.2f},"
"precision = {:.2f}, recall = {:.2f}"
.format(t, accuracy, precision, recall))
| 9,273 | 33.221402 | 78 | py |
cyphercat | cyphercat-master/cyphercat/train.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as fcnal
from sklearn.pipeline import Pipeline
from .metrics import eval_target_model
# Determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def label_to_onehot(labels, num_classes=10):
""" Converts label into a vector.
Args:
labels (int): Class label to convert to tensor.
num_classes (int): Number of classes for the model.
Returns:
(torch.tensor): Torch tensor with 0's everywhere except for 1 in
correct class.
"""
one_hot = torch.eye(num_classes)
return one_hot[labels.long()]
def train(model=None, data_loader=None, test_loader=None,
optimizer=None, criterion=None, n_epochs=0,
classes=None, verbose=False):
"""
Function to train a model provided
specified train/test sets and associated
training parameters.
Parameters
----------
model : Module
PyTorch conforming nn.Module function
data_loader : DataLoader
PyTorch dataloader function
test_loader : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
classes : list
list of classes
verbose : boolean
flag for verbose print statements
"""
losses = []
for epoch in range(n_epochs):
model.train()
for i, batch in enumerate(data_loader):
data, labels = batch
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(model, data_loader, classes=classes)
print("Test:")
test_acc = eval_target_model(model, test_loader, classes=classes)
# plt.plot(losses)
# plt.show()
return train_acc, test_acc
def train_attacker(attack_model=None, shadow_model=None,
shadow_train=None, shadow_out=None,
optimizer=None, criterion=None, n_epochs=0, k=0,
verbose=False):
"""
Trains attack model (classifies a sample as in or
out of training set) using shadow model outputs
(probabilities for sample class predictions).
The type of shadow model used can vary.
Parameters
----------
attack_model : Module
PyTorch conforming nn.Module function
shadow_model : Module
PyTorch conforming nn.Module function
shadow_train : DataLoader
PyTorch dataloader function
shadow_out : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
k : int
Value at which to end using train data list
"""
in_predicts = []
out_predicts = []
if type(shadow_model) is not Pipeline:
shadow_model = shadow_model
shadow_model.eval()
for epoch in range(n_epochs):
total = 0
correct = 0
train_top = np.empty((0, 2))
out_top = np.empty((0, 2))
for i, ((train_data, train_lbls),
(out_data, out_lbls)) in enumerate(zip(shadow_train,
shadow_out)):
# out_data = torch.randn(out_data.shape)
mini_batch_size = train_data.shape[0]
out_mini_batch_size = out_data.shape[0]
if mini_batch_size != out_mini_batch_size:
continue
'''if mini_batch_size != out_mini_batch_size:
break'''
if type(shadow_model) is not Pipeline:
train_data = train_data.to(device).detach()
out_data = out_data.to(device).detach()
train_posteriors = fcnal.softmax(shadow_model(train_data),
dim=1)
out_posteriors = fcnal.softmax(shadow_model(out_data),
dim=1)
else:
traininputs = train_data.view(train_data.shape[0], -1)
outinputs = out_data.view(out_data.shape[0], -1)
in_preds = shadow_model.predict_proba(traininputs)
train_posteriors = torch.from_numpy(in_preds).float()
# for p in in_preds:
# in_predicts.append(p.max())
out_preds = shadow_model.predict_proba(outinputs)
out_posteriors = torch.from_numpy(out_preds).float()
# for p in out_preds:
# out_predicts.append(p.max())
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:, :k].clone().to(device)
for p in train_top_k:
in_predicts.append((p.max()).item())
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:, :k].clone().to(device)
for p in out_top_k:
out_predicts.append((p.max()).item())
train_top = np.vstack((train_top,
train_top_k[:, :2].cpu().detach().numpy()))
out_top = np.vstack((out_top,
out_top_k[:, :2].cpu().detach().numpy()))
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
optimizer.zero_grad()
train_predictions = torch.squeeze(attack_model(train_top_k))
out_predictions = torch.squeeze(attack_model(out_top_k))
loss_train = criterion(train_predictions, train_lbl)
loss_out = criterion(out_predictions, out_lbl)
loss = (loss_train + loss_out) / 2
if type(shadow_model) is not Pipeline:
loss.backward()
optimizer.step()
correct += (train_predictions >= 0.5).sum().item()
correct += (out_predictions < 0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
if verbose:
print("[{}/{}][{}/{}] loss = {:.2f}, accuracy = {:.2f}"
.format(epoch, n_epochs, i, len(shadow_train),
loss.item(), 100 * correct / total))
# Plot distributions for target predictions
# in training set and out of training set
"""
fig, ax = plt.subplots(2,1)
plt.subplot(2,1,1)
plt.hist(in_predicts, bins='auto')
plt.title('In')
plt.subplot(2,1,2)
plt.hist(out_predicts, bins='auto')
plt.title('Out')
"""
'''
plt.scatter(out_top.T[0,:], out_top.T[1,:], c='b')
plt.scatter(train_top.T[0,:], train_top.T[1,:], c='r')
plt.show()
'''
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha
KD_loss *= nn.KLDivLoss(size_average=False)(
fcnal.log_softmax(inputs, dim=1),
fcnal.softmax(target, dim=1)
)
KD_loss += (1-self.alpha)*fcnal.cross_entropy(inputs, true_labels)
return KD_loss
def distill_training(teacher=None, learner=None, data_loader=None,
test_loader=None, optimizer=None,
criterion=None, n_epochs=0, verbose=False):
"""
:param teacher: network to provide soft labels in training
:param learner: network to distill knowledge into
:param data_loader: data loader for training data set
:param test_loaderL data loader for validation data
:param optimizer: optimizer for training
:param criterion: objective function, should allow for soft labels.
We suggest softCrossEntropy
:param n_epochs: epochs for training
:param verbose: verbose == True will print loss at each batch
:return: None, teacher model is trained in place
"""
losses = []
for epoch in range(n_epochs):
teacher.eval()
learner.train()
for i, batch in enumerate(data_loader):
with torch.set_grad_enabled(False):
data, labels = batch
data, labels = data.to(device), labels.to(device)
soft_lables = teacher(data)
with torch.set_grad_enabled(True):
optimizer.zero_grad()
outputs = learner(data)
loss = criterion(outputs, soft_lables, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(learner, data_loader, classes=None)
print("Testing:")
test_acc = eval_target_model(learner, test_loader, classes=None)
return train_acc, test_acc
def inf_adv_train(target_model=None, inf_model=None, train_set=None,
test_set=None, inf_in_set=None, target_optim=None,
target_criterion=None, inf_optim=None, inf_criterion=None,
n_epochs=0, privacy_theta=0, verbose=False):
"""Method to run adversarial training during membership inference
Args:
target_model (nn.Module): Target classifier to adversarially train.
inf_model (nn.Module): Adversary attacking the target during training.
train_set (DataLoader): DataLoader pointing to the classfier trainign
set (split[0]).
test_set (DataLoader): DataLoader poiting to the validation set. Also
used as out-of-set for the inference (split[1]).
inf_in_set (DataLoader): Data loader pointing to a subset of the
train_set used for inference in-set (split[4])
target_optim (torch.optim): Target optimizer.
target_criterion (nn.Module): Target loss criterion.
inf_optim (torch.optim): Adversary optimizer.
inf_criterion (nn.Module): Adversary loss criterion.
privacy_theta (float): Regularization constant. Sets relative
importance of classification loss vs. adversarial loss.
vebose (bool): If True will print the loss at each step in training.
Returns:
Example:
Todos:
Include example.
"""
# inf_losses = []
# losses = []
inf_model.train()
target_model.train()
for epoch in range(n_epochs):
train_top = np.array([])
out_top = np.array([])
train_p = np.array([])
out_p = np.array([])
total_inference = 0
total_correct_inference = 0
for k_count, ((in_data, _), (out_data, _)) in enumerate(zip(inf_in_set,
test_set)):
# train inference network
in_data, out_data = in_data.to(device), out_data.to(device)
mini_batch_size = in_data.shape[0]
out_mini_batch_size = out_data.shape[0]
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
train_posteriors = fcnal.softmax(target_model(in_data), dim=1)
out_posteriors = fcnal.softmax(target_model(out_data), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
out_sort, _ = torch.sort(out_posteriors, descending=True)
t_p = train_sort[:, :4].cpu().detach().numpy().flatten()
o_p = out_sort[:, :4].cpu().detach().numpy().flatten()
train_p = np.concatenate((train_p, t_p))
out_p = np.concatenate((out_p, o_p))
train_top = np.concatenate((train_top,
train_sort[:, 0].cpu().
detach().numpy()))
out_top = np.concatenate((out_top,
out_sort[:, 0].cpu().detach().numpy()))
inf_optim.zero_grad()
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbl).to(device))
train_inference = torch.squeeze(train_inference)
#
out_inference = inf_model(out_posteriors,
label_to_onehot(out_lbl).to(device))
out_inference = torch.squeeze(out_inference)
#
total_inference += 2*mini_batch_size
total_correct_inference += torch.sum(train_inference > 0.5).item()
total_correct_inference += torch.sum(out_inference < 0.5).item()
loss_train = inf_criterion(train_inference, train_lbl)
loss_out = inf_criterion(out_inference, out_lbl)
loss = privacy_theta * (loss_train + loss_out)/2
loss.backward()
inf_optim.step()
# train classifiction network
train_imgs, train_lbls = iter(train_set).next()
train_imgs, train_lbls = train_imgs.to(device), train_lbls.to(device)
target_optim.zero_grad()
outputs = target_model(train_imgs)
train_posteriors = fcnal.softmax(outputs, dim=1)
loss_classification = target_criterion(outputs, train_lbls)
train_lbl = torch.ones(mini_batch_size).to(device)
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbls).to(device))
train_inference = torch.squeeze(train_inference)
loss_infer = inf_criterion(train_inference, train_lbl)
loss = loss_classification - privacy_theta * loss_infer
loss.backward()
target_optim.step()
if verbose:
print("[{}/{}] loss = {}"
.format(epoch, n_epochs, loss.item()))
| 15,691 | 35.749415 | 79 | py |
cyphercat | cyphercat-master/cyphercat/attacks.py | # Pytorch imports
import torch
# Cyphercat imports
from .train import train, train_attacker
from .metrics import eval_membership_inference, eval_attack_model
# Device to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def ml_leaks1(target=None, shadow_model=None, attacker_model=None,
target_in_loader=None, target_out_loader=None,
shadow_train_loader=None, shadow_out_loader=None,
shadow_optim=None, attack_optim=None, shadow_criterion=None,
attack_criterion=None, shadow_epochs=0, attack_epochs=0,
classes=None, n_max_posteriors=3, retrain=True, verbose=False):
'''Implementation of ml_leaks 1 membership inference attack
Trains shadow network an independent data set and then trains the
attacker to infer membership on this shadow net. Finally, the attacker is
used to run mberbeship inference on the target.
Args:
target (nn.Module): Trained target network.
shadow_model (nn.Module): Shadow network to help train the attacker in
membership inference task.
attacker_model (nn.Module): Network to be trained in membership
inference task.
target_in_loader (DataLoader): DataLoader pointing to target in-data
used for testing the attack (split[4])
target_out_loader (DataLoader): Loads data pointing to target out-of-
training dataset (split[1]) used for attack evaluation.
shadow_train_loader (DataLoader): Loader for shadow_model training
(split[2]).
shadow_out_loader: Out-of-sample from shadow net, used to train the
attacker (split[3]).
shadow_optim (torch.optim): Optimizer for shadow_model training.
attack_optim (torch.optim): Optimizer for attacker_model training.
shadow_criterion (torch.nn): Loss function for shadow_model training.
attack_criterion (torch.nn): Loss function for attacker_model
training.
shadow_epochs (int): Number of epochs used to train the shadow network.
attack_epochs (int): Number of epochs used to train the attack network.
classes (list): Classes for membership inference task.
n_max_posteriors (int): Number of maximal posteriors to use in
membership inference attack.
retrain (bool): If True will retrain the shadow and attack network,
otherwise will simply use the provided attacker model as is fed.
verbose (bool): If True will print the loss at each batch during all
training steps.
Example:
To-do:
Add example to docstring.
'''
if retrain:
print('---- Training shadow network ----')
train(model=shadow_model, data_loader=shadow_train_loader,
test_loader=shadow_out_loader, optimizer=shadow_optim,
criterion=shadow_criterion, n_epochs=shadow_epochs,
classes=classes, verbose=verbose)
#
print('---- Training attack network ----')
train_attacker(attack_model=attacker_model, shadow_model=shadow_model,
shadow_train=shadow_train_loader,
shadow_out=shadow_out_loader, optimizer=attack_optim,
criterion=attack_criterion, n_epochs=attack_epochs,
k=n_max_posteriors)
#
print('---- Evaluate attack ----')
df_pr = eval_attack_model(attack_model=attacker_model, target=target,
target_train=target_in_loader,
target_out=target_out_loader, k=n_max_posteriors)
return df_pr
def ml_leaks3(target=None, target_in_loader=None, target_out_loader=None):
''' Implementation of ml_leaks 3 membership inference attack
Args:
target (nn.Module): Trained target network to attack
target_in_loader (DataLoader): Loader pointing to data used to
train target (split[4]). Used here to evaluate attack
performance.
target_out_loader: Loader pointing to the target out-of-training data
(split[1])
Example:
To-do:
Add example to docstring.
'''
eval_membership_inference(target_model=target,
target_train=target_in_loader,
target_out=target_out_loader)
def mi_gradient_ascent(input_sample=None, target_model=None, optimizer=None,
category=None, iterations=0, verbose=False):
""" Implementation of gradient based model inversion attack
Args:
input_sample (torch.tensor): Initialized input sample, usually
randomly generated. Size should match the model input.
target_model (nn.Module): Pretrained model to attack.
optimizer (nn.optim): Optimizer (initialized on image parameters) used
in attack.
category (int): Category to invert.
iterations (int): Query iterations in the attack.
verbose (bool): If True will print the loss at each step in attack.
Returns:
(list(float)): Returns a list of the losses at each iteration.
Example:
Todos:
Write example
"""
category = torch.Variable(torch.LongTensor([category])).to(device)
losses = []
for i_step in range(iterations):
target_model.zero_grad()
out = target_model(input_sample)
loss = -out.take(category)
loss.backward()
#
optimizer.step()
input_sample.grad.zero_()
losses.append(loss.data)
#
return losses
| 5,614 | 40.286765 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/lfw_dataset.py | import os
import shutil
import numpy as np
from skimage import io
from torch.utils.data.dataset import Dataset
class LFWDataset(Dataset):
"""
Faces in the Wild specific dataset class.
Includes indexing functionality.
Inherets from PyTorch Dataset class.
"""
def __init__(self, data_struct=None, train_set=True, transform=None):
self.data_struct = data_struct
self.custom_prep_data()
self.test_train_split = 0.8
self.transform = transform
n_classes, file_list, class_to_label = self.index(train_set)
self.n_classes = n_classes
self.file_list = file_list
self.people_to_idx = class_to_label
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img_path = self.file_list[idx]
image = io.imread(img_path)
label = self.people_to_idx[img_path.split('/')[-2]]
if self.transform is not None:
image = self.transform(image)
return image, label
def index(self, is_train_set):
data_dir = self.data_struct.save_path
img_paths = []
for p in os.listdir(data_dir):
for i in os.listdir(os.path.join(data_dir, p)):
img_paths.append(os.path.join(data_dir, p, i))
class_list = []
class_to_idx = {}
k = 0
for i in img_paths:
name = i.split('/')[-2]
if name not in class_to_idx:
class_list.append(name)
class_to_idx[name] = k
k += 1
n_classes = len(class_list)
img_paths = np.random.permutation(img_paths)
dataset_size = len(img_paths)
trainset_size = int(self.test_train_split * dataset_size)
if is_train_set:
file_list = img_paths[:trainset_size]
else:
file_list = img_paths[trainset_size:]
return n_classes, file_list, class_to_idx
def custom_prep_data(self):
data_name = self.data_struct.name
out_dir = self.data_struct.save_path
# LFW specific prep steps
lfw_dir = out_dir + '_original/'
# If dataset already downloaded an unpacked, do nothing
if os.path.isdir(lfw_dir):
return
os.rename(out_dir, lfw_dir)
people_dir = os.listdir(lfw_dir)
num_per_class = 20
for p in people_dir:
imgs = os.listdir(os.path.join(lfw_dir, p))
if len(imgs) >= num_per_class:
shutil.copytree(os.path.join(lfw_dir, p),
os.path.join(out_dir, p))
print('{} successfully downloaded and preprocessed.'.format(data_name))
| 2,692 | 27.956989 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/cyphercat_dataset.py | from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from .cifar10_dataset import Cifar10_preload_and_split
from .libri_dataset import Libri_preload_and_split
class CCATDataset():
"""
This is a generic cyphercat dataset class for accessing the various
datasets accessible to the package.
# Args
name (string): dataset name
splits (list): Dataframe with data path and metadata.
transform List(torch): list of torch transform functions
Must be None or length must == {1, len(splits)}
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, path='', name='', splits=[1], transforms=None):
self.path = path
self.name = name
self.splits = splits
self.nsplits = len(splits)
self.transforms = transforms
self.datasplits = self.prep_dataset_splits()
def prep_dataset_splits(self):
# Check that there is either 1 transform fn,
# or the same as the number of requested splits
if self.transforms:
tlen = len(self.transforms)
slen = self.nsplits
assert tlen==1 or tlen == slen, "Error: transform list incorrect. "\
"Must be 1 element or same length as splits. "\
"len(transforms) == {}".format(tlen)
# Grab appropriate preloader_splitter function
presplit_fn = get_preload_split_fn(self.name)
# Do the splits preloading...
return presplit_fn(path=self.path, splits=self.splits, transform=self.transforms)
def get_dataset_all_splits(self):
return self.datasplits
def get_split_n(self, n=0):
assert n >= 0 and n < self.nsplits, "Error: requesting invalid split."\
"Choose split btw 0 and {}".format(self.nsplits-1)
return self.datasplits[n]
# Functions
PRELOAD_SPLIT_FN_DICT = {'cifar-10': Cifar10_preload_and_split,
'librispeech': Libri_preload_and_split,
}
def get_preload_split_fn(name=''):
"""
Convenience function for retrieving allowed
cyphercat split dataset functions.
Parameters
----------
name : {'cifar-10', 'librispeech'}
Name of dataset
Returns
-------
fn : function
Dataset specific splitter function
"""
if name in PRELOAD_SPLIT_FN_DICT:
fn = PRELOAD_SPLIT_FN_DICT[name]
return fn
else:
raise ValueError('Invalid dataset, {}, entered. Must be '
'in {}'.format(name, PRELOAD_SPLIT_FN_DICT.keys()))
| 2,715 | 33.820513 | 93 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/cifar10_dataset.py | import torch
from torch.utils.data import Dataset, ConcatDataset
import torchvision
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import pandas as pd
import numpy as np
import pickle
import os
from .splitter import splitter, dataset_split
def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):
"""Index and split CIFAR10 dataset.
Args:
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
splits (list): list of fractional splits
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
"""
if path is None:
path = DATASETS_DIR
index_file = os.path.join(path, 'cifar10.index.csv')
indices = None
if os.path.exists(index_file):
index_csv = np.loadtxt(index_file)
indices = torch.tensor(index_csv)
print('Found predefined indexing file {}'.format(index_file))
trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)
testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)
fullset = ConcatDataset([trainset, testset])
print('Initializing CIFAR10Dataset splits')
# Currently five equal splits
dset_size = fullset.cumulative_sizes[-1]
int_splits = []
for i in range(len(splits)):
int_splits.append(int(dset_size * splits[i]))
if sum(int_splits) < dset_size:
rem = dset_size - sum(int_splits)
int_splits[-1] += rem
indices, splitsets = dataset_split(fullset, int_splits, indices=indices)
if not os.path.exists(index_file):
print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))
np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')
print('Finished splitting data.')
return splitsets
| 2,161 | 31.757576 | 105 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/splitter.py | from torch import randperm
from torch._utils import _accumulate
from torch.utils.data.dataset import Subset
import pandas as pd
def dataset_split(dataset=None, lengths=None, indices=None):
"""
Split a dataset into non-overlapping new datasets of given lengths.
If indices is undefined, then a random permutation of dataset
is generated. Slight modification of torch.utils.data.random_split
to gain access to permuted indices.
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
indices (tensor): permutations of instances
Returns:
indices (tensor): premutations of instances
"""
if sum(lengths) != len(dataset):
raise ValueError('Sum of input lengths does not equal the length of \
the input dataset!')
# If requested a random split of dataset
if indices is None:
indices = randperm(sum(lengths))
indices = (indices).long()
return indices, [Subset(dataset, indices[offset - length:offset])
for offset, length in zip(_accumulate(lengths), lengths)]
def splitter(dfs={}, df=None, unique_categories=[], category_id='', splits=[],
N=-1, split_by_class=False):
""" Splits the data for given unqie categories according to specified
fractions.
Args:
dfs (dict(Dataframe): Current dictionary of dataframes. New splits
will be concatenated to this dict.
df (Dataframe): Dataframe containg all of the data and metadata.
unique_categories (list(int)): List containing the indices of
categories to include in these splits.
category_id (string): Defining category for dataset in Dataframe
object.
splits (list(float)): List containing the fraction of the data to be
included in each split.
N (int): index to assign new splits when appending to dfs.
split_by_class=False (bool): If true, will split by class of false
will split by data
Returns:
(dict(Dataframe)): Updated dictionary of data splits.
Example:
Todo:
- Add example.
"""
# N is to keep track of the dataframe dict keys
n_splits = len(splits)
tot_categories = len(unique_categories)
# This if statement is terminated by a return to avoid else
if split_by_class:
start_category = 0
used_categories = 0
for idx, s in enumerate(splits):
if idx != n_splits-1:
n_categories = int(s*tot_categories)
used_categories += n_categories
else:
n_categories = tot_categories - used_categories
stop_category = start_category + n_categories
for i_cat, category in enumerate(unique_categories[start_category:
stop_category]):
if i_cat == 0:
dfs[idx + N] = df[df['speaker_id'] == category]
else:
dfs[idx + N] = dfs[idx + N].append(df[df['speaker_id'] ==
category])
start_category += n_categories
for idx in range(n_splits):
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
for category in unique_categories: # for each category
# category = valid_sequence.unique_categories[0]
tot_files = sum(df[category_id] == category)
mini_df = df[df[category_id] == category]
mini_df = mini_df.reset_index()
used_files = 0
start_file = 0
for idx, s in enumerate(splits): # for each split
if idx != n_splits-1:
n_files = int(s*tot_files)
used_files += n_files
else:
n_files = tot_files - used_files
# get stop index for the desired # of files:
stop_file = start_file + n_files
# initialize if first category, or append if later category
if category == unique_categories[0]:
dfs[idx + N] = (mini_df.iloc[start_file:stop_file])
else:
dfs[idx + N] = dfs[idx + N].append(mini_df.iloc[start_file:
stop_file])
# update start_file
start_file += n_files
for idx in range(n_splits): # for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
def splitter2(dfs={}, df=None, unique_categories=[], category_id='', splits=[],
N=-1, split_by_class=False):
""" Splits the data for given unqie categories according to specified
fractions.
Args:
dfs (dict(Dataframe): Current dictionary of dataframes. New splits
will be concatenated to this dict.
df (Dataframe): Dataframe containg all of the data and metadata.
unique_categories (list(int)): List containing the indices of
categories to include in these splits.
category_id (string): Defining category for dataset in Dataframe
object.
splits (list(float)): List containing the fraction of the data to be
included in each split.
N (int): index to assign new splits when appending to dfs.
split_by_class=False (bool): If true, will split by class of false
will split by data
Returns:
(dict(Dataframe)): Updated dictionary of data splits.
Example:
Todo:
- Add example.
"""
# N is to keep track of the dataframe dict keys
n_splits = len(splits)
dfs[N] = pd.DataFrame(columns=df.columns)
dfs[N+1] = pd.DataFrame(columns=df.columns)
tot_categories = len(unique_categories)
# This if statement is terminated by a return to avoid else
if split_by_class:
start_category = 0
used_categories = 0
for idx, s in enumerate(splits):
if idx != n_splits-1:
n_categories = int(s*tot_categories)
used_categories += n_categories
else:
n_categories = tot_categories - used_categories
stop_category = start_category + n_categories
for i_cat, category in enumerate(unique_categories[start_category:
stop_category]):
if i_cat == 0:
dfs[idx + N] = df[df['speaker_id'] == category]
else:
dfs[idx + N] = dfs[idx + N].append(df[df['speaker_id'] ==
category])
start_category += n_categories
for idx in range(n_splits):
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
for category in unique_categories: # for each category
mini_df = df[df[category_id] == category]
mini_df = mini_df.reset_index()
# Identify segments:
n_seg = len(mini_df.Section.unique())
seg1 = round(splits[0]*n_seg)
# Segments are not ordered, so just pick the first few for seg1
seg1s = mini_df.Section.unique()[:seg1]
dfs[N] = dfs[N].append(mini_df[mini_df['Section'].isin(seg1s)])
dfs[N+1] = dfs[N+1].append(mini_df[~mini_df['Section'].isin(seg1s)])
for idx in range(n_splits): # for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
| 7,516 | 35.848039 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/voices_dataset.py | from torch.utils.data import Dataset
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
from .splitter import splitter, splitter2
LIBRISPEECH_SAMPLING_RATE = 16000
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def load_or_index_subset(subset=None, path=None, fragment_seconds=3,
pad=False):
""" Subroutine to either load existing subset dataframe or index and save it
Args:
subset (string): Librispeech subset to either load or index.
path (string): Path to search for or save indexed subset.
fragment_seconds (float): Number of seconds for audio samples.
pad (bool): If true will accept short framgents and pad with silence.
Returns:
(pandas.Dataframe): Returns indexed subset in dataframe.
"""
index_file = path + '/VOiCES-{}.index.csv'.format(subset)
subset_index_path = index_file
if os.path.exists(subset_index_path):
df = pd.read_csv(subset_index_path)
# otherwise cache them
else:
print('Files not found, indexing {}'.format(subset))
speaker_file = '/VOiCES/Lab41-SRI-VOiCES-speaker-gender-dataset.tbl'
df = pd.read_csv(path+speaker_file, skiprows=0,
delimiter=' ', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower()
for col in df.columns]
df = df.assign(
sex=df['gender'].apply(lambda x: x.strip()),
subset=df['dataset'].apply(lambda x: x.strip()),
)
df = df.rename(columns={'speaker': 'id', 'gender': 'sex',
'dataset': 'subset'})
audio_files = index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# Remove duplicate column names
df = df[['id', 'sex', 'subset', 'filepath', 'length', 'seconds']]
# Add additional useful columns to dataframe:
snippets = []
mikes = []
degrees = []
noises = []
for i in df.index:
snip = df.filepath[i]
sg = snip.index('sg')
snippets.append(snip[sg+2:sg+6])
mc = snip.index('mc')
mikes.append(snip[mc+2:mc+4])
dg = snip.index('dg')
degrees.append(snip[dg+2:dg+5])
rm = snip.index('rm')
dash = snip[rm:].index('/') # Find first / after rm
noises.append(snip[rm:][dash+1:dash+5])
df = df.assign(Section=snippets, Mic=mikes,
Degree=degrees, Noise=noises)
mins = (df.groupby('id').sum()['seconds']/60)
min_dict = mins.to_dict()
df = df.assign(speaker_minutes=df['id'])
df['speaker_minutes'] = df['speaker_minutes'].map(min_dict)
# Save index files to data folder
df.to_csv(index_file, index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
print('\t Finished indexing {}. {} usable files found.'.format(subset,
len(df)))
return df
def Voices_preload_and_split(subset='room-1', seconds=3,
path=None, pad=False, splits=None):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising VOiCESDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# VOiCES is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for VOiCES!')
dfs = default_speaker_splitter2(dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for VOiCES!')
sample_dfs = default_sample_splitter(sample_dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/'
'sample_splits/VOiCES_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs
def index_subset(path=None, subset=None):
"""Index a subset by looping through all of it's files and recording their
speaker ID, filepath and length.
Args:
subset (string): Name of the subset.
path (string): Path to search for files to parse.
Returns:
(list(dicts)): A list of dicts containing information about all the
audio files in a particular subset of the LibriSpeech dataset.
Example:
Todo:
- Write example.
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path +
'/VOiCES/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.wav')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path +
'/VOiCES/{}/'.format(subset)):
if len(files) == 0:
continue
for f in files:
# Skip non-sound files
if not f.endswith('.wav'):
continue
progress_bar.update(1)
librispeech_id = int(root[-4:])
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.relpath(os.path.join(root, f), path),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def default_speaker_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_speaker_splitter2(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
# Below math to get the data volume for splits 4 & 5 similar
n_male = len(unique_male)//50
n_female = len(unique_female)//50
n1 = 23
n2 = 46
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male*n1],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male*n1:n_male*n2],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
# split off unheard speakers for outset
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male*n2:],
category_id=cat_id, splits=[0, 1],
N=4, split_by_class=True)
# Replace in set with subset of df0
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female*n1],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female*n1:n_female*n2],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female*n2:],
category_id=cat_id, splits=[0, 1], N=4,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(6):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_sample_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
class Voices_dataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset. Calling __getitem__
will return the transformed librispeech audio sample and it's label
# Args
df (Dataframe): Dataframe with audiosample path and metadata.
seconds (int): Minimum length of audio to include in the dataset. Any
files smaller than this will be ignored or padded to this length.
downsampling (int): Downsampling factor.
label (string): One of {speaker, sex}. Whether to use sex or speaker
ID as a label.
stochastic (bool): If True then we will take a random fragment from
each file of sufficient length. If False we will always take a
fragment starting at the beginning of a file.
pad (bool): Whether or not to pad samples with 0s to get them to the
desired length. If `stochastic` is True then a random number of 0s
will be appended/prepended to each side to pad the sequence to the
desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, df=None, seconds=3, downsampling=1, label='speaker',
stochastic=True, pad=False, transform=None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the'
' fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered
# 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i
for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(
os.path.join(DATASETS_DIR, self.datasetid_to_filepath[index]))
# Choose a random sample of the file
if self.stochastic:
upper_bound = max(len(instance) - self.fragment_length, 1)
fragment_start_index = np.random.randint(0, upper_bound)
else:
fragment_start_index = 0
instance = instance[fragment_start_index:
fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length
# by appending a random number of 0s before and the
# appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len),
'constant')
else:
# Deterministic padding. Append 0s to reach desired length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and
# downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 24,898 | 38.966292 | 80 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/libri_dataset.py | from torch.utils.data import Dataset
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
from .splitter import splitter
LIBRISPEECH_SAMPLING_RATE = 16000
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def load_or_index_subset(subset=None, path=None, fragment_seconds=3,
pad=False):
""" Subroutine to either load existing subset dataframe or index and save it
Args:
subset (string): Librispeech subset to either load or index.
path (string): Path to search for or save indexed subset.
fragment_seconds (float): Number of seconds for audio samples.
pad (bool): If true will accept short framgents and pad with silence.
Returns:
(pandas.Dataframe): Returns indexed subset in dataframe.
"""
index_file = path + '/libri-{}.index.csv'.format(subset)
subset_index_path = index_file
if os.path.exists(subset_index_path):
df = pd.read_csv(subset_index_path)
# otherwise cache them
else:
print('Files not found, indexing {}'.format(subset))
df = pd.read_csv(path+'/LibriSpeech/SPEAKERS.TXT', skiprows=11,
delimiter='|', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower()
for col in df.columns]
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
name=df['name'].apply(lambda x: x.strip()),
)
audio_files = index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# Save index files to data folder
df.to_csv(index_file, index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id',
'minutes': 'speaker_minutes'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
print('\t Finished indexing {}. {} usable files found.'.format(subset,
len(df)))
return df
def Libri_preload_and_split(subset='train-clean-100',
outset='test-clean', seconds=3,
path=None, pad=False, splits=None):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
outset (string): Librispeech subset to use for last split. Holds audio
for speakers that are out of set, used for membership inference on
speakers instead of utterances.
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising LibriSpeechDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
outset_df = load_or_index_subset(subset=outset, path=path,
fragment_seconds=fragment_seconds,
pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for LibriSpeech!')
dfs = default_speaker_splitter(dfs, df)
dfs[num_splits-1] = outset_df
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/libri-%s/sample_splits/'
'libri_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/libri-%s/sample_splits/'
'libri_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for LibriSpeech!')
sample_dfs = default_sample_splitter(sample_dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/libri-%s/'
'sample_splits/libri_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs
def index_subset(path=None, subset=None):
"""Index a subset by looping through all of it's files and recording their
speaker ID, filepath and length.
Args:
subset (string): Name of the subset.
path (string): Path to search for files to parse.
Returns:
(list(dicts)): A list of dicts containing information about all the
audio files in a particular subset of the LibriSpeech dataset.
Example:
Todo:
- Write example.
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path +
'/LibriSpeech/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.flac')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path +
'/LibriSpeech/{}/'.format(subset)):
if len(files) == 0:
continue
librispeech_id = int(root.split('/')[-2])
for f in files:
# Skip non-sound files
if not f.endswith('.flac'):
continue
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.relpath(os.path.join(root, f), path),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def default_speaker_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_sample_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
class LibriSpeechDataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset. Calling __getitem__
will return the transformed librispeech audio sample and it's label
# Args
df (Dataframe): Dataframe with audiosample path and metadata.
seconds (int): Minimum length of audio to include in the dataset. Any
files smaller than this will be ignored or padded to this length.
downsampling (int): Downsampling factor.
label (string): One of {speaker, sex}. Whether to use sex or speaker
ID as a label.
stochastic (bool): If True then we will take a random fragment from
each file of sufficient length. If False we will always take a
fragment starting at the beginning of a file.
pad (bool): Whether or not to pad samples with 0s to get them to the
desired length. If `stochastic` is True then a random number of 0s
will be appended/prepended to each side to pad the sequence to the
desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, df=None, seconds=3, downsampling=1, label='speaker',
stochastic=True, pad=False, transform=None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the'
' fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered
# 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i
for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(
os.path.join(DATASETS_DIR, self.datasetid_to_filepath[index]))
# Choose a random sample of the file
if self.stochastic:
upper_bound = max(len(instance) - self.fragment_length, 1)
fragment_start_index = np.random.randint(0, upper_bound)
else:
fragment_start_index = 0
instance = instance[fragment_start_index:
fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length
# by appending a random number of 0s before and the
# appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len),
'constant')
else:
# Deterministic padding. Append 0s to reach desired length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and
# downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 21,505 | 39.96381 | 80 | py |
cyphercat | cyphercat-master/cyphercat/tests/test_dim_reduction.py | import sys
sys.path.insert(0, '../../')
import cyphercat as cc
import torch
import torch.nn as nn
import numpy as np
class test_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=64):
super(test_cnn, self).__init__()
self.size = size
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * (self.size//4) * (self.size//4),
2*n_filters)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
# x = x.view(-1, 2*self.n_filters * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
def comparison(model, wrap, wrap2, image):
print('Batch size = ', image.shape[0])
print(' - Original model: returns full vector ')
out = model(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
print(' - Wrapped 1 : returns top 3 ')
out = wrap(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
print(' - Wrapped breaking probabilities : returns top 3 ')
out = wrap2(image)
print('Batch labels = ', out.argmax(dim=1))
print('Full label vectors\n', out)
conv_net = test_cnn(size=32)
wrapped = cc.dimensionality_reduction(model=conv_net, n_top=3,
break_posterior=False)
wrapped2 = cc.dimensionality_reduction(model=conv_net, n_top=3,
break_posterior=True)
img = torch.randn((2, 3, 32, 32))
print(' ------- Training -------\n')
comparison(conv_net, wrapped, wrapped2, img)
print(' ------- Eval -------\n')
conv_net.eval()
wrapped.eval()
wrapped2.eval()
comparison(conv_net, wrapped, wrapped2, img)
| 2,350 | 31.205479 | 77 | py |
cyphercat | cyphercat-master/cyphercat/utils/visualize_utils.py | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize(imgs_shape)
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
def bhook(module, gradInputs, gradOutputs):
global gcgrads
gcgrads = gradOutputs
def fhook(module, input, output):
global gcdata
gcdata = output
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label, layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32, 32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform, x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1, 2, 0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):
guided = smooth_guided_saliency_map(model, input, label, transform = transform, plot = False)
gc = gradcam(model, input, label, layer_name = layer_name, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32, 32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
| 8,630 | 31.085502 | 117 | py |
cyphercat | cyphercat-master/cyphercat/utils/svc_utils.py | from __future__ import print_function
import os
import numpy as np
import torch
import torchvision
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
def load(dataloader):
"""Loads/flattens inputs and targets for use in SVM. Returns inputs and targets."""
for data in dataloader:
x, y = data
x = x.view(x.shape[0], -1)
return x, y
def hp_grid(n_components, C_range, gamma_range):
"""Creates and returns list of classifiers with grid of hyperparameters given by C_range and gamma_range."""
clfs = []
pca = PCA(n_components=n_components)
scaling = MinMaxScaler(feature_range=(-1, 1))
for i in C_range:
for j in gamma_range:
svc = svm.SVC(C=i, gamma=j)
clf = make_pipeline(pca, scaling, svc)
clfs.append(clf)
return clfs
def train_grid(clfs, inputs, targets):
"""Trains classifiers in a list; returns list of trained classifiers."""
fitted_clfs = []
for i in range(len(clfs)):
x = clfs[i].fit(inputs, targets)
fitted_clfs.append(x)
print("Fitted: {} / {}".format(i+1, len(clfs)))
return fitted_clfs
def predict_eval(clf, inputs, targets, training=False):
"""Given a classifier and inputs, returns predictions and evaluated classifier accuracy."""
preds = clf.predict(inputs)
num_correct = torch.eq(torch.from_numpy(preds), targets).sum().item()
acc = (num_correct / len(targets)) * 100
if training:
# print('C: ', clf.get_params(deep=True)['svc__C'], 'gamma: ', clf.get_params(deep=True)['svc__gamma'])
print("C: {} gamma: {}".format(clf.get_params(deep=True)['svc__C'], clf.get_params(deep=True)['svc__gamma']))
print('Training Accuracy: {}'.format(acc))
else:
print('Testing Accuracy: {}'.format(acc))
return preds, acc
def maxacc_gen(test_accs, train_accs, clfs):
"""Finds and returns model with highest test accuracy and model with train/test accuracy ratio closest to 1."""
test = np.array(test_accs)
train = np.array(train_accs)
maxacc = clfs[np.argmax(test)]
gen = clfs[np.argmin(train-test)]
return maxacc, gen
def save_proba(fn, pipe, inputs, targets):
"""Fits svm with probabilities and saves to disk."""
params = pipe.get_params(deep=True)
pca = PCA(n_components=180)
scaling = MinMaxScaler(feature_range=(-1, 1))
pipe_prob = make_pipeline(pca, scaling, svm.SVC(C=params['svc__C'], gamma=params['svc__gamma'], probability=True))
pipe_prob.fit(inputs, targets)
joblib.dump(pipe_prob, fn)
def load_svm(directory, gen=True):
"""Returns loaded SVM saved with classification baselines.
'gen' : Model with train/test accuracy ratio closest to 1.
'maxacc' : Model with highest test accuracy."""
if gen:
clf = 'gen'
if not gen:
clf = 'maxacc'
dataset = directory.split('/')[-1]
path = 'SVM' + dataset + '_' + clf + '_proba.pkl'
svm = joblib.load(os.path.join(directory, path))
return svm
def class_acc(preds, targets, classes):
"Returns classifier accuracy for each class."
correct = 0
class_correct = np.zeros(len(classes))
class_total = np.zeros(len(classes))
for j in range(len(targets)):
class_total[targets[j]] += 1
if np.argmax(preds[j]) == targets[j]:
class_correct[targets[j]] += 1
correct += 1
class_accuracies = (class_correct/class_total) * 100
accuracy = (correct / len(targets)) * 100
for i in range(len(class_accuracies)):
print('Accuracy of {} : {} %%'.format(classes[i], class_accuracies[i]))
print('Total Accuracy: {} %%'.format(accuracy))
| 3,817 | 29.790323 | 118 | py |
3DDFA | 3DDFA-master/main.py | #!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
"""
The pipeline of 3DDFA prediction: given one image, predict the 3d face vertices, 68 landmarks and visualization.
[todo]
1. CPU optimization: https://pmchojnacki.wordpress.com/2018/10/07/slow-pytorch-cpu-performance
"""
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import dlib
from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
import scipy.io as sio
from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \
draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors
from utils.cv_plot import plot_pose_box
from utils.estimate_pose import parse_pose
from utils.render import get_depths_image, cget_depths_image, cpncc
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# 2. load dlib model for face detection and landmark used for face cropping
if args.dlib_landmark:
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
if args.dlib_bbox:
face_detector = dlib.get_frontal_face_detector()
# 3. forward
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
for img_fp in args.files:
img_ori = cv2.imread(img_fp)
if args.dlib_bbox:
rects = face_detector(img_ori, 1)
else:
rects = []
if len(rects) == 0:
rects = dlib.rectangles()
rect_fp = img_fp + '.bbox'
lines = open(rect_fp).read().strip().split('\n')[1:]
for l in lines:
l, r, t, b = [int(_) for _ in l.split(' ')[1:]]
rect = dlib.rectangle(l, r, t, b)
rects.append(rect)
pts_res = []
Ps = [] # Camera matrix collection
poses = [] # pose collection, [todo: validate it]
vertices_lst = [] # store multiple face vertices
ind = 0
suffix = get_suffix(img_fp)
for rect in rects:
# whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping
if args.dlib_landmark:
# - use landmark for cropping
pts = face_regressor(img_ori, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
roi_box = parse_roi_box_from_landmark(pts)
else:
# - use detected face bbox
bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()]
roi_box = parse_roi_box_from_bbox(bbox)
img = crop_img(img_ori, roi_box)
# forward: one step
img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
# 68 pts
pts68 = predict_68pts(param, roi_box)
# two-step for more accurate bbox to crop face
if args.bbox_init == 'two':
roi_box = parse_roi_box_from_landmark(pts68)
img_step2 = crop_img(img_ori, roi_box)
img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img_step2).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
pts_res.append(pts68)
P, pose = parse_pose(param)
Ps.append(P)
poses.append(pose)
# dense face 3d vertices
if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj:
vertices = predict_dense(param, roi_box)
vertices_lst.append(vertices)
if args.dump_ply:
dump_to_ply(vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind))
if args.dump_vertex:
dump_vertex(vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind))
if args.dump_pts:
wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind)
np.savetxt(wfp, pts68, fmt='%.3f')
print('Save 68 3d landmarks to {}'.format(wfp))
if args.dump_roi_box:
wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind)
np.savetxt(wfp, roi_box, fmt='%.3f')
print('Save roi box to {}'.format(wfp))
if args.dump_paf:
wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind)
wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind)
paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size)
cv2.imwrite(wfp_paf, paf_feature)
cv2.imwrite(wfp_crop, img)
print('Dump to {} and {}'.format(wfp_crop, wfp_paf))
if args.dump_obj:
wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind)
colors = get_colors(img_ori, vertices)
write_obj_with_colors(wfp, vertices, tri, colors)
print('Dump obj with sampled texture to {}'.format(wfp))
ind += 1
if args.dump_pose:
# P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify)
img_pose = plot_pose_box(img_ori, Ps, pts_res)
wfp = img_fp.replace(suffix, '_pose.jpg')
cv2.imwrite(wfp, img_pose)
print('Dump to {}'.format(wfp))
if args.dump_depth:
wfp = img_fp.replace(suffix, '_depth.png')
# depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version
depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version
cv2.imwrite(wfp, depths_img)
print('Dump to {}'.format(wfp))
if args.dump_pncc:
wfp = img_fp.replace(suffix, '_pncc.png')
pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version
cv2.imwrite(wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR
print('Dump to {}'.format(wfp))
if args.dump_res:
draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument('-f', '--files', nargs='+',
help='image files paths fed into network, single or multiple images')
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
parser.add_argument('--show_flg', default='true', type=str2bool, help='whether show the visualization result')
parser.add_argument('--bbox_init', default='one', type=str,
help='one|two: one-step bbox initialization or two-step')
parser.add_argument('--dump_res', default='true', type=str2bool, help='whether write out the visualization image')
parser.add_argument('--dump_vertex', default='false', type=str2bool,
help='whether write out the dense face vertices to mat')
parser.add_argument('--dump_ply', default='true', type=str2bool)
parser.add_argument('--dump_pts', default='true', type=str2bool)
parser.add_argument('--dump_roi_box', default='false', type=str2bool)
parser.add_argument('--dump_pose', default='true', type=str2bool)
parser.add_argument('--dump_depth', default='true', type=str2bool)
parser.add_argument('--dump_pncc', default='true', type=str2bool)
parser.add_argument('--dump_paf', default='false', type=str2bool)
parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size')
parser.add_argument('--dump_obj', default='true', type=str2bool)
parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox')
parser.add_argument('--dlib_landmark', default='true', type=str2bool,
help='whether use dlib landmark to crop image')
args = parser.parse_args()
main(args)
| 9,511 | 45.174757 | 121 | py |
3DDFA | 3DDFA-master/video_demo.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import dlib
from utils.ddfa import ToTensorGjz, NormalizeGjz
import scipy.io as sio
from utils.inference import (
parse_roi_box_from_landmark,
crop_img,
predict_68pts,
predict_dense,
)
from utils.cv_plot import plot_kpt
from utils.render import get_depths_image, cget_depths_image, cpncc
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 0. open video
# vc = cv2.VideoCapture(str(args.video) if len(args.video) == 1 else args.video)
vc = cv2.VideoCapture(args.video if int(args.video) != 0 else 0)
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)[
'state_dict'
]
model = getattr(mobilenet_v1, arch)(
num_classes=62
) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# 2. load dlib model for face detection and landmark used for face cropping
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
face_detector = dlib.get_frontal_face_detector()
# 3. forward
success, frame = vc.read()
last_frame_pts = []
while success:
if len(last_frame_pts) == 0:
rects = face_detector(frame, 1)
for rect in rects:
pts = face_regressor(frame, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
last_frame_pts.append(pts)
vertices_lst = []
for lmk in last_frame_pts:
roi_box = parse_roi_box_from_landmark(lmk)
img = crop_img(frame, roi_box)
img = cv2.resize(
img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR
)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
vertex = predict_dense(param, roi_box)
lmk[:] = pts68[:2]
vertices_lst.append(vertex)
pncc = cpncc(frame, vertices_lst, tri - 1) / 255.0
frame = frame / 255.0 * (1.0 - pncc)
cv2.imshow('3ddfa', frame)
cv2.waitKey(1)
success, frame = vc.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument(
'-v',
'--video',
default='0',
type=str,
help='video file path or opencv cam index',
)
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
args = parser.parse_args()
main(args)
| 3,552 | 31.3 | 88 | py |
3DDFA | 3DDFA-master/benchmark.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import mobilenet_v1
import time
import numpy as np
from benchmark_aflw2000 import calc_nme as calc_nme_alfw2000
from benchmark_aflw2000 import ana as ana_alfw2000
from benchmark_aflw import calc_nme as calc_nme_alfw
from benchmark_aflw import ana as ana_aflw
from utils.ddfa import ToTensorGjz, NormalizeGjz, DDFATestDataset, reconstruct_vertex
import argparse
def extract_param(checkpoint_fp, root='', filelists=None, arch='mobilenet_1', num_classes=62, device_ids=[0],
batch_size=128, num_workers=4):
map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
checkpoint = torch.load(checkpoint_fp, map_location=map_location)['state_dict']
torch.cuda.set_device(device_ids[0])
model = getattr(mobilenet_v1, arch)(num_classes=num_classes)
model = nn.DataParallel(model, device_ids=device_ids).cuda()
model.load_state_dict(checkpoint)
dataset = DDFATestDataset(filelists=filelists, root=root,
transform=transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]))
data_loader = data.DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
cudnn.benchmark = True
model.eval()
end = time.time()
outputs = []
with torch.no_grad():
for _, inputs in enumerate(data_loader):
inputs = inputs.cuda()
output = model(inputs)
for i in range(output.shape[0]):
param_prediction = output[i].cpu().numpy().flatten()
outputs.append(param_prediction)
outputs = np.array(outputs, dtype=np.float32)
print(f'Extracting params take {time.time() - end: .3f}s')
return outputs
def _benchmark_aflw(outputs):
return ana_aflw(calc_nme_alfw(outputs))
def _benchmark_aflw2000(outputs):
return ana_alfw2000(calc_nme_alfw2000(outputs))
def benchmark_alfw_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw(outputs)
def benchmark_aflw2000_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw2000(outputs)
def benchmark_pipeline(arch, checkpoint_fp):
device_ids = [0]
def aflw():
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW_GT_crop',
filelists='test.data/AFLW_GT_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=128)
benchmark_alfw_params(params)
def aflw2000():
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW2000-3D_crop',
filelists='test.data/AFLW2000-3D_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=128)
benchmark_aflw2000_params(params)
aflw2000()
aflw()
def main():
parser = argparse.ArgumentParser(description='3DDFA Benchmark')
parser.add_argument('--arch', default='mobilenet_1', type=str)
parser.add_argument('-c', '--checkpoint-fp', default='models/phase1_wpdc_vdc.pth.tar', type=str)
args = parser.parse_args()
benchmark_pipeline(args.arch, args.checkpoint_fp)
if __name__ == '__main__':
main()
| 3,554 | 28.87395 | 111 | py |
3DDFA | 3DDFA-master/speed_cpu.py | #!/usr/bin/env python3
# coding: utf-8
import timeit
import numpy as np
SETUP_CODE = '''
import mobilenet_v1
import torch
model = mobilenet_v1.mobilenet_1()
model.eval()
data = torch.rand(1, 3, 120, 120)
'''
TEST_CODE = '''
with torch.no_grad():
model(data)
'''
def main():
repeat, number = 5, 100
res = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=repeat,
number=number)
res = np.array(res, dtype=np.float32)
res /= number
mean, var = np.mean(res), np.std(res)
print('Inference speed: {:.2f}±{:.2f} ms'.format(mean * 1000, var * 1000))
if __name__ == '__main__':
main()
| 693 | 18.277778 | 78 | py |
3DDFA | 3DDFA-master/wpdc_loss.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
from math import sqrt
from utils.io import _numpy_to_cuda
from utils.params import *
_to_tensor = _numpy_to_cuda # gpu
def _parse_param_batch(param):
"""Work for both numpy and tensor"""
N = param.shape[0]
p_ = param[:, :12].view(N, 3, -1)
p = p_[:, :, :3]
offset = p_[:, :, -1].view(N, 3, 1)
alpha_shp = param[:, 12:52].view(N, -1, 1)
alpha_exp = param[:, 52:].view(N, -1, 1)
return p, offset, alpha_shp, alpha_exp
class WPDCLoss(nn.Module):
"""Input and target are all 62-d param"""
def __init__(self, opt_style='resample', resample_num=132):
super(WPDCLoss, self).__init__()
self.opt_style = opt_style
self.param_mean = _to_tensor(param_mean)
self.param_std = _to_tensor(param_std)
self.u = _to_tensor(u)
self.w_shp = _to_tensor(w_shp)
self.w_exp = _to_tensor(w_exp)
self.w_norm = _to_tensor(w_norm)
self.w_shp_length = self.w_shp.shape[0] // 3
self.keypoints = _to_tensor(keypoints)
self.resample_num = resample_num
def reconstruct_and_parse(self, input, target):
# reconstruct
param = input * self.param_std + self.param_mean
param_gt = target * self.param_std + self.param_mean
# parse param
p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)
return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)
def _calc_weights_resample(self, input_, target_):
# resample index
if self.resample_num <= 0:
keypoints_mix = self.keypoints
else:
index = torch.randperm(self.w_shp_length)[:self.resample_num].reshape(-1, 1)
keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1).cuda()
keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
w_shp_base = self.w_shp[keypoints_mix]
u_base = self.u[keypoints_mix]
w_exp_base = self.w_exp[keypoints_mix]
input = torch.tensor(input_.data.clone(), requires_grad=False)
target = torch.tensor(target_.data.clone(), requires_grad=False)
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
input = self.param_std * input + self.param_mean
target = self.param_std * target + self.param_mean
N = input.shape[0]
offset[:, -1] = offsetg[:, -1]
weights = torch.zeros_like(input, dtype=torch.float)
tmpv = (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg).view(N, -1, 3).permute(0, 2, 1)
tmpv_norm = torch.norm(tmpv, dim=2)
offset_norm = sqrt(w_shp_base.shape[0] // 3)
# for pose
param_diff_pose = torch.abs(input[:, :11] - target[:, :11])
for ind in range(11):
if ind in [0, 4, 8]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 0]
elif ind in [1, 5, 9]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 1]
elif ind in [2, 6, 10]:
weights[:, ind] = param_diff_pose[:, ind] * tmpv_norm[:, 2]
else:
weights[:, ind] = param_diff_pose[:, ind] * offset_norm
## This is the optimizest version
# for shape_exp
magic_number = 0.00057339936 # scale
param_diff_shape_exp = torch.abs(input[:, 12:] - target[:, 12:])
# weights[:, 12:] = magic_number * param_diff_shape_exp * self.w_norm
w = torch.cat((w_shp_base, w_exp_base), dim=1)
w_norm = torch.norm(w, dim=0)
# print('here')
weights[:, 12:] = magic_number * param_diff_shape_exp * w_norm
eps = 1e-6
weights[:, :11] += eps
weights[:, 12:] += eps
# normalize the weights
maxes, _ = weights.max(dim=1)
maxes = maxes.view(-1, 1)
weights /= maxes
# zero the z
weights[:, 11] = 0
return weights
def forward(self, input, target, weights_scale=10):
if self.opt_style == 'resample':
weights = self._calc_weights_resample(input, target)
loss = weights * (input - target) ** 2
return loss.mean()
else:
raise Exception(f'Unknown opt style: {self.opt_style}')
if __name__ == '__main__':
pass
| 4,540 | 33.664122 | 108 | py |
3DDFA | 3DDFA-master/vdc_loss.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
from utils.io import _load, _numpy_to_cuda, _numpy_to_tensor
from utils.params import *
_to_tensor = _numpy_to_cuda # gpu
def _parse_param_batch(param):
"""Work for both numpy and tensor"""
N = param.shape[0]
p_ = param[:, :12].view(N, 3, -1)
p = p_[:, :, :3]
offset = p_[:, :, -1].view(N, 3, 1)
alpha_shp = param[:, 12:52].view(N, -1, 1)
alpha_exp = param[:, 52:].view(N, -1, 1)
return p, offset, alpha_shp, alpha_exp
class VDCLoss(nn.Module):
def __init__(self, opt_style='all'):
super(VDCLoss, self).__init__()
self.u = _to_tensor(u)
self.param_mean = _to_tensor(param_mean)
self.param_std = _to_tensor(param_std)
self.w_shp = _to_tensor(w_shp)
self.w_exp = _to_tensor(w_exp)
self.keypoints = _to_tensor(keypoints)
self.u_base = self.u[self.keypoints]
self.w_shp_base = self.w_shp[self.keypoints]
self.w_exp_base = self.w_exp[self.keypoints]
self.w_shp_length = self.w_shp.shape[0] // 3
self.opt_style = opt_style
def reconstruct_and_parse(self, input, target):
# reconstruct
param = input * self.param_std + self.param_mean
param_gt = target * self.param_std + self.param_mean
# parse param
p, offset, alpha_shp, alpha_exp = _parse_param_batch(param)
pg, offsetg, alpha_shpg, alpha_expg = _parse_param_batch(param_gt)
return (p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg)
def forward_all(self, input, target):
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
N = input.shape[0]
offset[:, -1] = offsetg[:, -1]
gt_vertex = pg @ (self.u + self.w_shp @ alpha_shpg + self.w_exp @ alpha_expg) \
.view(N, -1, 3).permute(0, 2, 1) + offsetg
vertex = p @ (self.u + self.w_shp @ alpha_shp + self.w_exp @ alpha_exp) \
.view(N, -1, 3).permute(0, 2, 1) + offset
diff = (gt_vertex - vertex) ** 2
loss = torch.mean(diff)
return loss
def forward_resample(self, input, target, resample_num=132):
(p, offset, alpha_shp, alpha_exp), (pg, offsetg, alpha_shpg, alpha_expg) \
= self.reconstruct_and_parse(input, target)
# resample index
index = torch.randperm(self.w_shp_length)[:resample_num].reshape(-1, 1)
keypoints_resample = torch.cat((3 * index, 3 * index + 1, 3 * index + 2), dim=1).view(-1).cuda()
keypoints_mix = torch.cat((self.keypoints, keypoints_resample))
w_shp_base = self.w_shp[keypoints_mix]
u_base = self.u[keypoints_mix]
w_exp_base = self.w_exp[keypoints_mix]
offset[:, -1] = offsetg[:, -1]
N = input.shape[0]
gt_vertex = pg @ (u_base + w_shp_base @ alpha_shpg + w_exp_base @ alpha_expg) \
.view(N, -1, 3).permute(0, 2, 1) + offsetg
vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp) \
.view(N, -1, 3).permute(0, 2, 1) + offset
diff = (gt_vertex - vertex) ** 2
loss = torch.mean(diff)
return loss
def forward(self, input, target):
if self.opt_style == 'all':
return self.forward_all(input, target)
elif self.opt_style == 'resample':
return self.forward_resample(input, target)
else:
raise Exception(f'Unknown opt style: f{opt_style}')
if __name__ == '__main__':
pass
| 3,606 | 34.362745 | 104 | py |
3DDFA | 3DDFA-master/train.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
from pathlib import Path
import numpy as np
import argparse
import time
import logging
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import mobilenet_v1
import torch.backends.cudnn as cudnn
from utils.ddfa import DDFADataset, ToTensorGjz, NormalizeGjz
from utils.ddfa import str2bool, AverageMeter
from utils.io import mkdir
from vdc_loss import VDCLoss
from wpdc_loss import WPDCLoss
# global args (configuration)
args = None
lr = None
arch_choices = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
def parse_args():
parser = argparse.ArgumentParser(description='3DMM Fitting')
parser.add_argument('-j', '--workers', default=6, type=int)
parser.add_argument('--epochs', default=40, type=int)
parser.add_argument('--start-epoch', default=1, type=int)
parser.add_argument('-b', '--batch-size', default=128, type=int)
parser.add_argument('-vb', '--val-batch-size', default=32, type=int)
parser.add_argument('--base-lr', '--learning-rate', default=0.001, type=float)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float)
parser.add_argument('--print-freq', '-p', default=20, type=int)
parser.add_argument('--resume', default='', type=str, metavar='PATH')
parser.add_argument('--devices-id', default='0,1', type=str)
parser.add_argument('--filelists-train',
default='', type=str)
parser.add_argument('--filelists-val',
default='', type=str)
parser.add_argument('--root', default='')
parser.add_argument('--snapshot', default='', type=str)
parser.add_argument('--log-file', default='output.log', type=str)
parser.add_argument('--log-mode', default='w', type=str)
parser.add_argument('--size-average', default='true', type=str2bool)
parser.add_argument('--num-classes', default=62, type=int)
parser.add_argument('--arch', default='mobilenet_1', type=str,
choices=arch_choices)
parser.add_argument('--frozen', default='false', type=str2bool)
parser.add_argument('--milestones', default='15,25,30', type=str)
parser.add_argument('--task', default='all', type=str)
parser.add_argument('--test_initial', default='false', type=str2bool)
parser.add_argument('--warmup', default=-1, type=int)
parser.add_argument('--param-fp-train',
default='',
type=str)
parser.add_argument('--param-fp-val',
default='')
parser.add_argument('--opt-style', default='resample', type=str) # resample
parser.add_argument('--resample-num', default=132, type=int)
parser.add_argument('--loss', default='vdc', type=str)
global args
args = parser.parse_args()
# some other operations
args.devices_id = [int(d) for d in args.devices_id.split(',')]
args.milestones = [int(m) for m in args.milestones.split(',')]
snapshot_dir = osp.split(args.snapshot)[0]
mkdir(snapshot_dir)
def print_args(args):
for arg in vars(args):
s = arg + ': ' + str(getattr(args, arg))
logging.info(s)
def adjust_learning_rate(optimizer, epoch, milestones=None):
"""Sets the learning rate: milestone is a list/tuple"""
def to(epoch):
if epoch <= args.warmup:
return 1
elif args.warmup < epoch <= milestones[0]:
return 0
for i in range(1, len(milestones)):
if milestones[i - 1] < epoch <= milestones[i]:
return i
return len(milestones)
n = to(epoch)
global lr
lr = args.base_lr * (0.2 ** n)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
logging.info(f'Save checkpoint to {filename}')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
end = time.time()
# loader is batch style
# for i, (input, target) in enumerate(train_loader):
for i, (input, target) in enumerate(train_loader):
target.requires_grad = False
target = target.cuda(non_blocking=True)
output = model(input)
data_time.update(time.time() - end)
if args.loss.lower() == 'vdc':
loss = criterion(output, target)
elif args.loss.lower() == 'wpdc':
loss = criterion(output, target)
elif args.loss.lower() == 'pdc':
loss = criterion(output, target)
else:
raise Exception(f'Unknown loss {args.loss}')
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# log
if i % args.print_freq == 0:
logging.info(f'Epoch: [{epoch}][{i}/{len(train_loader)}]\t'
f'LR: {lr:8f}\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
def validate(val_loader, model, criterion, epoch):
model.eval()
end = time.time()
with torch.no_grad():
losses = []
for i, (input, target) in enumerate(val_loader):
# compute output
target.requires_grad = False
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
losses.append(loss.item())
elapse = time.time() - end
loss = np.mean(losses)
logging.info(f'Val: [{epoch}][{len(val_loader)}]\t'
f'Loss {loss:.4f}\t'
f'Time {elapse:.3f}')
def main():
parse_args() # parse global argsl
# logging setup
logging.basicConfig(
format='[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler(args.log_file, mode=args.log_mode),
logging.StreamHandler()
]
)
print_args(args) # print args
# step1: define the model structure
model = getattr(mobilenet_v1, args.arch)(num_classes=args.num_classes)
torch.cuda.set_device(args.devices_id[0]) # fix bug for `ERROR: all tensors must be on devices[0]`
model = nn.DataParallel(model, device_ids=args.devices_id).cuda() # -> GPU
# step2: optimization: loss and optimization method
# criterion = nn.MSELoss(size_average=args.size_average).cuda()
if args.loss.lower() == 'wpdc':
print(args.opt_style)
criterion = WPDCLoss(opt_style=args.opt_style).cuda()
logging.info('Use WPDC Loss')
elif args.loss.lower() == 'vdc':
criterion = VDCLoss(opt_style=args.opt_style).cuda()
logging.info('Use VDC Loss')
elif args.loss.lower() == 'pdc':
criterion = nn.MSELoss(size_average=args.size_average).cuda()
logging.info('Use PDC loss')
else:
raise Exception(f'Unknown Loss {args.loss}')
optimizer = torch.optim.SGD(model.parameters(),
lr=args.base_lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# step 2.1 resume
if args.resume:
if Path(args.resume).is_file():
logging.info(f'=> loading checkpoint {args.resume}')
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)['state_dict']
# checkpoint = torch.load(args.resume)['state_dict']
model.load_state_dict(checkpoint)
else:
logging.info(f'=> no checkpoint found at {args.resume}')
# step3: data
normalize = NormalizeGjz(mean=127.5, std=128) # may need optimization
train_dataset = DDFADataset(
root=args.root,
filelists=args.filelists_train,
param_fp=args.param_fp_train,
transform=transforms.Compose([ToTensorGjz(), normalize])
)
val_dataset = DDFADataset(
root=args.root,
filelists=args.filelists_val,
param_fp=args.param_fp_val,
transform=transforms.Compose([ToTensorGjz(), normalize])
)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True, pin_memory=True, drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=args.val_batch_size, num_workers=args.workers,
shuffle=False, pin_memory=True)
# step4: run
cudnn.benchmark = True
if args.test_initial:
logging.info('Testing from initial')
validate(val_loader, model, criterion, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs + 1):
# adjust learning rate
adjust_learning_rate(optimizer, epoch, args.milestones)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
filename = f'{args.snapshot}_checkpoint_epoch_{epoch}.pth.tar'
save_checkpoint(
{
'epoch': epoch,
'state_dict': model.state_dict(),
# 'optimizer': optimizer.state_dict()
},
filename
)
validate(val_loader, model, criterion, epoch)
if __name__ == '__main__':
main()
| 9,938 | 34.244681 | 105 | py |
3DDFA | 3DDFA-master/mobilenet_v1.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import division
"""
Creates a MobileNet Model as defined in:
Andrew G. Howard Menglong Zhu Bo Chen, et.al. (2017).
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.
Copyright (c) Yang Lu, 2017
Modified By cleardusk
"""
import math
import torch.nn as nn
__all__ = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
class DepthWiseBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, prelu=False):
super(DepthWiseBlock, self).__init__()
inplanes, planes = int(inplanes), int(planes)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size=3, padding=1, stride=stride, groups=inplanes,
bias=False)
self.bn_dw = nn.BatchNorm2d(inplanes)
self.conv_sep = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_sep = nn.BatchNorm2d(planes)
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_dw(x)
out = self.bn_dw(out)
out = self.relu(out)
out = self.conv_sep(out)
out = self.bn_sep(out)
out = self.relu(out)
return out
class MobileNet(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000, prelu=False, input_channel=3):
""" Constructor
Args:
widen_factor: config of widen_factor
num_classes: number of classes
"""
super(MobileNet, self).__init__()
block = DepthWiseBlock
self.conv1 = nn.Conv2d(input_channel, int(32 * widen_factor), kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * widen_factor))
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
self.dw2_1 = block(32 * widen_factor, 64 * widen_factor, prelu=prelu)
self.dw2_2 = block(64 * widen_factor, 128 * widen_factor, stride=2, prelu=prelu)
self.dw3_1 = block(128 * widen_factor, 128 * widen_factor, prelu=prelu)
self.dw3_2 = block(128 * widen_factor, 256 * widen_factor, stride=2, prelu=prelu)
self.dw4_1 = block(256 * widen_factor, 256 * widen_factor, prelu=prelu)
self.dw4_2 = block(256 * widen_factor, 512 * widen_factor, stride=2, prelu=prelu)
self.dw5_1 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_2 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_3 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_4 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_5 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_6 = block(512 * widen_factor, 1024 * widen_factor, stride=2, prelu=prelu)
self.dw6 = block(1024 * widen_factor, 1024 * widen_factor, prelu=prelu)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(int(1024 * widen_factor), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dw2_1(x)
x = self.dw2_2(x)
x = self.dw3_1(x)
x = self.dw3_2(x)
x = self.dw4_1(x)
x = self.dw4_2(x)
x = self.dw5_1(x)
x = self.dw5_2(x)
x = self.dw5_3(x)
x = self.dw5_4(x)
x = self.dw5_5(x)
x = self.dw5_6(x)
x = self.dw6(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def mobilenet(widen_factor=1.0, num_classes=1000):
"""
Construct MobileNet.
widen_factor=1.0 for mobilenet_1
widen_factor=0.75 for mobilenet_075
widen_factor=0.5 for mobilenet_05
widen_factor=0.25 for mobilenet_025
"""
model = MobileNet(widen_factor=widen_factor, num_classes=num_classes)
return model
def mobilenet_2(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=2.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_1(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=1.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_075(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.75, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_05(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.5, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_025(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.25, num_classes=num_classes, input_channel=input_channel)
return model
| 5,224 | 32.709677 | 110 | py |
3DDFA | 3DDFA-master/c++/convert_to_onnx.py | #!/usr/bin/env python3
# coding: utf-8
import torch
import mobilenet_v1
def main():
# checkpoint_fp = 'weights/phase1_wpdc_vdc.pth.tar'
checkpoint_fp = 'weights/mb_1.p'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
kc = k.replace('module.', '')
if kc in model_dict.keys():
model_dict[kc] = checkpoint[k]
if kc in ['fc_param.bias', 'fc_param.weight']:
model_dict[kc.replace('_param', '')] = checkpoint[k]
model.load_state_dict(model_dict)
# conversion
batch_size = 1
dummy_input = torch.randn(batch_size, 3, 120, 120)
torch.onnx.export(model, dummy_input, checkpoint_fp.replace('.p', '.onnx'))
# torch.onnx.export(model, dummy_input, checkpoint_fp.replace('.pth.tar', '.onnx'))
if __name__ == '__main__':
main()
| 1,135 | 32.411765 | 100 | py |
3DDFA | 3DDFA-master/c++/mobilenet_v1.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import division
"""
Creates a MobileNet Model as defined in:
Andrew G. Howard Menglong Zhu Bo Chen, et.al. (2017).
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.
Copyright (c) Yang Lu, 2017
Modified By cleardusk
"""
import math
import torch.nn as nn
__all__ = ['mobilenet_2', 'mobilenet_1', 'mobilenet_075', 'mobilenet_05', 'mobilenet_025']
class DepthWiseBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, prelu=False):
super(DepthWiseBlock, self).__init__()
inplanes, planes = int(inplanes), int(planes)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size=3, padding=1, stride=stride, groups=inplanes,
bias=False)
self.bn_dw = nn.BatchNorm2d(inplanes)
self.conv_sep = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_sep = nn.BatchNorm2d(planes)
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_dw(x)
out = self.bn_dw(out)
out = self.relu(out)
out = self.conv_sep(out)
out = self.bn_sep(out)
out = self.relu(out)
return out
class MobileNet(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000, prelu=False, input_channel=3):
""" Constructor
Args:
widen_factor: config of widen_factor
num_classes: number of classes
"""
super(MobileNet, self).__init__()
block = DepthWiseBlock
self.conv1 = nn.Conv2d(input_channel, int(32 * widen_factor), kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * widen_factor))
if prelu:
self.relu = nn.PReLU()
else:
self.relu = nn.ReLU(inplace=True)
self.dw2_1 = block(32 * widen_factor, 64 * widen_factor, prelu=prelu)
self.dw2_2 = block(64 * widen_factor, 128 * widen_factor, stride=2, prelu=prelu)
self.dw3_1 = block(128 * widen_factor, 128 * widen_factor, prelu=prelu)
self.dw3_2 = block(128 * widen_factor, 256 * widen_factor, stride=2, prelu=prelu)
self.dw4_1 = block(256 * widen_factor, 256 * widen_factor, prelu=prelu)
self.dw4_2 = block(256 * widen_factor, 512 * widen_factor, stride=2, prelu=prelu)
self.dw5_1 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_2 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_3 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_4 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_5 = block(512 * widen_factor, 512 * widen_factor, prelu=prelu)
self.dw5_6 = block(512 * widen_factor, 1024 * widen_factor, stride=2, prelu=prelu)
self.dw6 = block(1024 * widen_factor, 1024 * widen_factor, prelu=prelu)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(int(1024 * widen_factor), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dw2_1(x)
x = self.dw2_2(x)
x = self.dw3_1(x)
x = self.dw3_2(x)
x = self.dw4_1(x)
x = self.dw4_2(x)
x = self.dw5_1(x)
x = self.dw5_2(x)
x = self.dw5_3(x)
x = self.dw5_4(x)
x = self.dw5_5(x)
x = self.dw5_6(x)
x = self.dw6(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def mobilenet(widen_factor=1.0, num_classes=1000):
"""
Construct MobileNet.
widen_factor=1.0 for mobilenet_1
widen_factor=0.75 for mobilenet_075
widen_factor=0.5 for mobilenet_05
widen_factor=0.25 for mobilenet_025
"""
model = MobileNet(widen_factor=widen_factor, num_classes=num_classes)
return model
def mobilenet_2(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=2.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_1(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=1.0, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_075(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.75, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_05(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.5, num_classes=num_classes, input_channel=input_channel)
return model
def mobilenet_025(num_classes=62, input_channel=3):
model = MobileNet(widen_factor=0.25, num_classes=num_classes, input_channel=input_channel)
return model
| 5,224 | 32.709677 | 110 | py |
3DDFA | 3DDFA-master/utils/io.py | #!/usr/bin/env python3
# coding: utf-8
import os
import numpy as np
import torch
import pickle
import scipy.io as sio
def mkdir(d):
"""only works on *nix system"""
if not os.path.isdir(d) and not os.path.exists(d):
os.system('mkdir -p {}'.format(d))
def _get_suffix(filename):
"""a.jpg -> jpg"""
pos = filename.rfind('.')
if pos == -1:
return ''
return filename[pos + 1:]
def _load(fp):
suffix = _get_suffix(fp)
if suffix == 'npy':
return np.load(fp)
elif suffix == 'pkl':
return pickle.load(open(fp, 'rb'))
def _dump(wfp, obj):
suffix = _get_suffix(wfp)
if suffix == 'npy':
np.save(wfp, obj)
elif suffix == 'pkl':
pickle.dump(obj, open(wfp, 'wb'))
else:
raise Exception('Unknown Type: {}'.format(suffix))
def _load_tensor(fp, mode='cpu'):
if mode.lower() == 'cpu':
return torch.from_numpy(_load(fp))
elif mode.lower() == 'gpu':
return torch.from_numpy(_load(fp)).cuda()
def _tensor_to_cuda(x):
if x.is_cuda:
return x
else:
return x.cuda()
def _load_gpu(fp):
return torch.from_numpy(_load(fp)).cuda()
def load_bfm(model_path):
suffix = _get_suffix(model_path)
if suffix == 'mat':
C = sio.loadmat(model_path)
model = C['model_refine']
model = model[0, 0]
model_new = {}
w_shp = model['w'].astype(np.float32)
model_new['w_shp_sim'] = w_shp[:, :40]
w_exp = model['w_exp'].astype(np.float32)
model_new['w_exp_sim'] = w_exp[:, :10]
u_shp = model['mu_shape']
u_exp = model['mu_exp']
u = (u_shp + u_exp).astype(np.float32)
model_new['mu'] = u
model_new['tri'] = model['tri'].astype(np.int32) - 1
# flatten it, pay attention to index value
keypoints = model['keypoints'].astype(np.int32) - 1
keypoints = np.concatenate((3 * keypoints, 3 * keypoints + 1, 3 * keypoints + 2), axis=0)
model_new['keypoints'] = keypoints.T.flatten()
#
w = np.concatenate((w_shp, w_exp), axis=1)
w_base = w[keypoints]
w_norm = np.linalg.norm(w, axis=0)
w_base_norm = np.linalg.norm(w_base, axis=0)
dim = w_shp.shape[0] // 3
u_base = u[keypoints].reshape(-1, 1)
w_shp_base = w_shp[keypoints]
w_exp_base = w_exp[keypoints]
model_new['w_norm'] = w_norm
model_new['w_base_norm'] = w_base_norm
model_new['dim'] = dim
model_new['u_base'] = u_base
model_new['w_shp_base'] = w_shp_base
model_new['w_exp_base'] = w_exp_base
_dump(model_path.replace('.mat', '.pkl'), model_new)
return model_new
else:
return _load(model_path)
_load_cpu = _load
_numpy_to_tensor = lambda x: torch.from_numpy(x)
_tensor_to_numpy = lambda x: x.cpu()
_numpy_to_cuda = lambda x: _tensor_to_cuda(torch.from_numpy(x))
_cuda_to_tensor = lambda x: x.cpu()
_cuda_to_numpy = lambda x: x.cpu().numpy()
| 3,012 | 24.974138 | 97 | py |
3DDFA | 3DDFA-master/utils/ddfa.py | #!/usr/bin/env python3
# coding: utf-8
import os.path as osp
from pathlib import Path
import numpy as np
import torch
import torch.utils.data as data
import cv2
import pickle
import argparse
from .io import _numpy_to_tensor, _load_cpu, _load_gpu
from .params import *
def _parse_param(param):
"""Work for both numpy and tensor"""
p_ = param[:12].reshape(3, -1)
p = p_[:, :3]
offset = p_[:, -1].reshape(3, 1)
alpha_shp = param[12:52].reshape(-1, 1)
alpha_exp = param[52:].reshape(-1, 1)
return p, offset, alpha_shp, alpha_exp
def reconstruct_vertex(param, whitening=True, dense=False, transform=True):
"""Whitening param -> 3d vertex, based on the 3dmm param: u_base, w_shp, w_exp
dense: if True, return dense vertex, else return 68 sparse landmarks. All dense or sparse vertex is transformed to
image coordinate space, but without alignment caused by face cropping.
transform: whether transform to image space
"""
if len(param) == 12:
param = np.concatenate((param, [0] * 50))
if whitening:
if len(param) == 62:
param = param * param_std + param_mean
else:
param = np.concatenate((param[:11], [0], param[11:]))
param = param * param_std + param_mean
p, offset, alpha_shp, alpha_exp = _parse_param(param)
if dense:
vertex = p @ (u + w_shp @ alpha_shp + w_exp @ alpha_exp).reshape(3, -1, order='F') + offset
if transform:
# transform to image coordinate space
vertex[1, :] = std_size + 1 - vertex[1, :]
else:
"""For 68 pts"""
vertex = p @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp).reshape(3, -1, order='F') + offset
if transform:
# transform to image coordinate space
vertex[1, :] = std_size + 1 - vertex[1, :]
return vertex
def img_loader(path):
return cv2.imread(path, cv2.IMREAD_COLOR)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ToTensorGjz(object):
def __call__(self, pic):
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
def __repr__(self):
return self.__class__.__name__ + '()'
class NormalizeGjz(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
tensor.sub_(self.mean).div_(self.std)
return tensor
class DDFADataset(data.Dataset):
def __init__(self, root, filelists, param_fp, transform=None, **kargs):
self.root = root
self.transform = transform
self.lines = Path(filelists).read_text().strip().split('\n')
self.params = _numpy_to_tensor(_load_cpu(param_fp))
self.img_loader = img_loader
def _target_loader(self, index):
target = self.params[index]
return target
def __getitem__(self, index):
path = osp.join(self.root, self.lines[index])
img = self.img_loader(path)
target = self._target_loader(index)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.lines)
class DDFATestDataset(data.Dataset):
def __init__(self, filelists, root='', transform=None):
self.root = root
self.transform = transform
self.lines = Path(filelists).read_text().strip().split('\n')
def __getitem__(self, index):
path = osp.join(self.root, self.lines[index])
img = img_loader(path)
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.lines)
| 4,316 | 26.673077 | 118 | py |
Squeezeformer | Squeezeformer-main/examples/squeezeformer/test.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tqdm import tqdm
import argparse
from scipy.special import softmax
import datasets
import tensorflow as tf
from src.configs.config import Config
from src.datasets.asr_dataset import ASRSliceDataset
from src.featurizers.speech_featurizers import TFSpeechFeaturizer
from src.featurizers.text_featurizers import SentencePieceFeaturizer
from src.models.conformer import ConformerCtc
from src.utils import env_util, file_util
logger = env_util.setup_environment()
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
DEFAULT_YAML = os.path.join(os.path.abspath(os.path.dirname(__file__)), "config.yml")
tf.keras.backend.clear_session()
def parse_arguments():
parser = argparse.ArgumentParser(prog="Conformer Testing")
parser.add_argument("--config", type=str, default=DEFAULT_YAML, help="The file path of model configuration file")
parser.add_argument("--mxp", default=False, action="store_true", help="Enable mixed precision")
parser.add_argument("--device", type=int, default=0, help="Device's id to run test on")
parser.add_argument("--cpu", default=False, action="store_true", help="Whether to only use cpu")
parser.add_argument("--saved", type=str, default=None, help="Path to saved model")
parser.add_argument("--output", type=str, default=None, help="Result filepath")
# Dataset arguments
parser.add_argument("--bs", type=int, default=None, help="Test batch size")
parser.add_argument("--dataset_path", type=str, required=True, help="path to the tsv manifest files")
parser.add_argument("--dataset", type=str, default="test_other",
choices=["dev_clean", "dev_other", "test_clean", "test_other"], help="Testing dataset")
parser.add_argument("--input_padding", type=int, default=3700)
parser.add_argument("--label_padding", type=int, default=530)
# Architecture arguments
parser.add_argument("--fixed_arch", default=None, help="force fixed architecture")
# Decoding arguments
parser.add_argument("--beam_size", type=int, default=None, help="ctc beam size")
args = parser.parse_args()
return args
def parse_fixed_arch(args):
parsed_arch = args.fixed_arch.split('|')
i, rep = 0, 1
fixed_arch = []
while i < len(parsed_arch):
if parsed_arch[i].isnumeric():
rep = int(parsed_arch[i])
else:
block = parsed_arch[i].split(',')
assert len(block) == NUM_LAYERS_IN_BLOCK
for _ in range(rep):
fixed_arch.append(block)
rep = 1
i += 1
return fixed_arch
args = parse_arguments()
config = Config(args.config)
NUM_BLOCKS = config.model_config['encoder_num_blocks']
NUM_LAYERS_IN_BLOCK = 4
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": args.mxp})
env_util.setup_devices([args.device], cpu=args.cpu)
speech_featurizer = TFSpeechFeaturizer(config.speech_config)
logger.info("Use SentencePiece ...")
text_featurizer = SentencePieceFeaturizer(config.decoder_config)
tf.random.set_seed(0)
# Parse fixed architecture
if args.fixed_arch is not None:
fixed_arch = parse_fixed_arch(args)
if len(fixed_arch) != NUM_BLOCKS:
logger.warn(
f"encoder_num_blocks={config.model_config['encoder_num_blocks']} is " \
f"different from len(fixed_arch) = {len(fixed_arch)}." \
)
logger.warn(f"Changing `encoder_num_blocks` to {len(fixed_arch)}")
config.model_config['encoder_num_blocks'] = len(fixed_arch)
logger.info(f"Changing fixed arch: {fixed_arch}")
config.model_config['encoder_fixed_arch'] = fixed_arch
if args.dataset_path is not None:
dataset_path = os.path.join(args.dataset_path, f"{args.dataset}.tsv")
logger.info(f"dataset: {args.dataset} at {dataset_path}")
config.learning_config.test_dataset_config.data_paths = [dataset_path]
else:
raise ValueError("specify the manifest file path using --dataset_path")
test_dataset = ASRSliceDataset(
speech_featurizer=speech_featurizer,
text_featurizer=text_featurizer,
input_padding_length=args.input_padding,
label_padding_length=args.label_padding,
**vars(config.learning_config.test_dataset_config)
)
conformer = ConformerCtc(
**config.model_config,
vocabulary_size=text_featurizer.num_classes,
)
conformer.make(speech_featurizer.shape)
if args.saved:
conformer.load_weights(args.saved, by_name=True)
else:
logger.warning("Model is initialized randomly, please use --saved to assign checkpoint")
conformer.summary(line_length=100)
conformer.add_featurizers(speech_featurizer, text_featurizer)
batch_size = args.bs or config.learning_config.running_config.batch_size
test_data_loader = test_dataset.create(batch_size)
blank_id = text_featurizer.blank
true_decoded = []
pred_decoded = []
beam_decoded = []
#for batch in enumerate(test_data_loader):
for k, batch in tqdm(enumerate(test_data_loader)):
labels, labels_len = batch[1]['labels'], batch[1]['labels_length']
outputs = conformer(batch[0], training=False)
logits, logits_len = outputs['logits'], outputs['logits_length']
probs = softmax(logits)
if args.beam_size is not None:
beam = tf.nn.ctc_beam_search_decoder(
tf.transpose(logits, perm=[1, 0, 2]), logits_len, beam_width=args.beam_size, top_paths=1,
)
beam = tf.sparse.to_dense(beam[0][0]).numpy()
for i, (p, l, label, ll) in enumerate(zip(probs, logits_len, labels, labels_len)):
# p: length x characters
pred = p[:l].argmax(-1)
decoded_prediction = []
previous = blank_id
# remove the repeting characters and the blanck characters
for p in pred:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
if len(decoded_prediction) == 0:
decoded = ""
else:
decoded = text_featurizer.iextract([decoded_prediction]).numpy()[0].decode('utf-8')
pred_decoded.append(decoded)
label_len = tf.math.reduce_sum(tf.cast(label != 0, tf.int32))
true_decoded.append(text_featurizer.iextract([label[:label_len]]).numpy()[0].decode('utf-8'))
if args.beam_size is not None:
b = beam[i]
previous = blank_id
# remove the repeting characters and the blanck characters
beam_prediction = []
for p in b:
if (p != previous or previous == blank_id) and p != blank_id:
beam_prediction.append(p)
previous = p
if len(beam_prediction) == 0:
decoded = ""
else:
decoded = text_featurizer.iextract([beam_prediction]).numpy()[0].decode('utf-8')
beam_decoded.append(decoded)
wer_metric = datasets.load_metric("wer")
logger.info(f"Length decoded: {len(true_decoded)}")
logger.info(f"WER: {wer_metric.compute(predictions=pred_decoded, references=true_decoded)}")
if args.beam_size is not None:
logger.info(f"WER-beam: {wer_metric.compute(predictions=beam_decoded, references=true_decoded)}")
if args.output is not None:
with file_util.save_file(file_util.preprocess_paths(args.output)) as filepath:
overwrite = True
if tf.io.gfile.exists(filepath):
overwrite = input(f"Overwrite existing result file {filepath} ? (y/n): ").lower() == "y"
if overwrite:
logger.info(f"Saving result to {args.output} ...")
with open(filepath, "w") as openfile:
openfile.write("PATH\tDURATION\tGROUNDTRUTH\tGREEDY\tBEAMSEARCH\n")
progbar = tqdm(total=test_dataset.total_steps, unit="batch")
for i, (groundtruth, greedy) in enumerate(zip(true_decoded, pred_decoded)):
openfile.write(f"N/A\tN/A\t{groundtruth}\t{greedy}\tN/A\n")
progbar.update(1)
progbar.close()
| 8,644 | 37.59375 | 117 | py |
Squeezeformer | Squeezeformer-main/src/models/base_model.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import mixed_precision as mxp
from ..utils import file_util, env_util
class BaseModel(tf.keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metrics = {}
self.use_loss_scale = False
def save(
self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None,
save_traces=True,
):
with file_util.save_file(filepath) as path:
super().save(
filepath=path,
overwrite=overwrite,
include_optimizer=include_optimizer,
save_format=save_format,
signatures=signatures,
options=options,
save_traces=save_traces,
)
def save_weights(
self,
filepath,
overwrite=True,
save_format=None,
options=None,
):
with file_util.save_file(filepath) as path:
super().save_weights(
filepath=path,
overwrite=overwrite,
save_format=save_format,
options=options,
)
def load_weights(
self,
filepath,
by_name=False,
skip_mismatch=False,
options=None,
):
with file_util.read_file(filepath) as path:
super().load_weights(
filepath=path,
by_name=by_name,
skip_mismatch=skip_mismatch,
options=options,
)
@property
def metrics(self):
return self._metrics.values()
def add_metric(self, metric: tf.keras.metrics.Metric):
self._metrics[metric.name] = metric
def make(self, *args, **kwargs):
""" Custom function for building model (uses self.build so cannot overwrite that function) """
raise NotImplementedError()
def compile(self, loss, optimizer, run_eagerly=None, **kwargs):
if not env_util.has_devices("TPU"):
optimizer = mxp.experimental.LossScaleOptimizer(tf.keras.optimizers.get(optimizer), "dynamic")
self.use_loss_scale = True
loss_metric = tf.keras.metrics.Mean(name="loss", dtype=tf.float32)
self._metrics = {loss_metric.name: loss_metric}
super().compile(optimizer=optimizer, loss=loss, run_eagerly=run_eagerly, **kwargs)
# -------------------------------- STEP FUNCTIONS -------------------------------------
def gradient_step(self, inputs, y_true):
with tf.GradientTape() as tape:
y_pred = self(inputs, training=True)
loss = self.loss(y_true, y_pred)
if self.use_loss_scale:
scaled_loss = self.optimizer.get_scaled_loss(loss)
if self.use_loss_scale:
gradients = tape.gradient(scaled_loss, self.trainable_weights)
gradients = self.optimizer.get_unscaled_gradients(gradients)
else:
gradients = tape.gradient(loss, self.trainable_weights)
return loss, y_pred, gradients
def train_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of training data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric
"""
inputs, y_true = batch
loss, y_pred, gradients = self.gradient_step(inputs, y_true)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self._metrics["loss"].update_state(loss)
if 'step_loss' in self._metrics:
self._metrics['step_loss'].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
if 'labels' in self._metrics:
self._metrics['labels'].update_state(y_true)
if 'logits' in self._metrics:
self._metrics['logits'].update_state(y_pred)
if 'logits_len' in self._metrics:
self._metrics['logits_len'].update_state(y_pred)
return {m.name: m.result() for m in self.metrics}
def test_step(self, batch):
"""
Args:
batch ([tf.Tensor]: a batch of validation data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric prefixed with "val_"
"""
inputs, y_true = batch
y_pred = self(inputs, training=False)
loss = self.loss(y_true, y_pred)
self._metrics["loss"].update_state(loss)
if 'step_loss' in self._metrics:
self._metrics['step_loss'].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
if 'labels' in self._metrics:
self._metrics['labels'].update_state(y_true)
if 'logits' in self._metrics:
self._metrics['logits'].update_state(y_pred)
if 'logits_len' in self._metrics:
self._metrics['logits_len'].update_state(y_pred)
return {m.name: m.result() for m in self.metrics}
def predict_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of testing data
Returns:
[tf.Tensor]: stacked tensor of shape [B, 3] with each row is the text [truth, greedy, beam_search]
"""
inputs, y_true = batch
labels = self.text_featurizer.iextract(y_true["labels"])
greedy_decoding = self.recognize(inputs)
if self.text_featurizer.decoder_config.beam_width == 0:
beam_search_decoding = tf.map_fn(lambda _: tf.convert_to_tensor("", dtype=tf.string), labels)
else:
beam_search_decoding = self.recognize_beam(inputs)
return tf.stack([labels, greedy_decoding, beam_search_decoding], axis=-1)
# -------------------------------- INFERENCE FUNCTIONS -------------------------------------
def recognize(self, *args, **kwargs):
""" Greedy decoding function that used in self.predict_step """
raise NotImplementedError()
def recognize_beam(self, *args, **kwargs):
""" Beam search decoding function that used in self.predict_step """
raise NotImplementedError()
| 6,880 | 35.026178 | 111 | py |
Squeezeformer | Squeezeformer-main/src/models/conformer_encoder.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from .submodules.glu import GLU
from .submodules.subsampling import Conv2dSubsampling
from .submodules.positional_encoding import PositionalEncoding
from .submodules.multihead_attention import MultiHeadAttention, RelPositionMultiHeadAttention
from .submodules.time_reduction import TimeReductionLayer
from ..utils import shape_util
logger = tf.get_logger()
class FFModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
adaptive_scale=False,
ff_expansion_rate=4,
name="ff_module",
**kwargs,
):
super(FFModule, self).__init__(name=name, **kwargs)
self.fc_factor = fc_factor
logger.info(f"fc factor set as {self.fc_factor}")
self.adaptive_scale = adaptive_scale
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
ffn1_max = input_dim ** -0.5
ffn2_max = (ff_expansion_rate * input_dim) ** -0.5
self.ffn1 = tf.keras.layers.Dense(
ff_expansion_rate * input_dim, name=f"{name}_dense_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-ffn1_max, maxval=ffn1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-ffn1_max, maxval=ffn1_max),
)
self.act = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act")
self.do1 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_1")
self.ffn2 = tf.keras.layers.Dense(
input_dim, name=f"{name}_dense_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-ffn2_max, maxval=ffn2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-ffn2_max, maxval=ffn2_max),
)
self.do2 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_2")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(self, inputs, training=False, **kwargs):
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
outputs = self.ffn1(outputs, training=training)
outputs = self.act(outputs)
outputs = self.do1(outputs, training=training)
outputs = self.ffn2(outputs, training=training)
outputs = self.do2(outputs, training=training)
outputs = self.res_add([inputs, self.fc_factor * outputs])
return outputs
class MHSAModule(tf.keras.layers.Layer):
def __init__(
self,
head_size,
num_heads,
dropout=0.0,
mha_type="relmha",
adaptive_scale=False,
name="mhsa_module",
**kwargs,
):
super(MHSAModule, self).__init__(name=name, **kwargs)
self.adaptive_scale = adaptive_scale
input_dim = num_heads * head_size
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
if mha_type == "relmha":
self.mha = RelPositionMultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size, num_heads=num_heads,
)
elif mha_type == "mha":
self.mha = MultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size, num_heads=num_heads,
)
else:
raise ValueError("mha_type must be either 'mha' or 'relmha'")
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
self.mha_type = mha_type
def call(self, inputs, training=False, mask=None, pos=False, **kwargs):
if pos is False:
inputs, pos = inputs # pos is positional encoding
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
if self.mha_type == "relmha":
outputs = self.mha([outputs, outputs, outputs, pos], training=training, mask=mask)
else:
outputs = outputs + pos
outputs = self.mha([outputs, outputs, outputs], training=training, mask=mask)
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
class ConvModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
kernel_size=31,
dropout=0.0,
depth_multiplier=1,
conv_expansion_rate=2,
conv_use_glu=False,
adaptive_scale=False,
name="conv_module",
**kwargs,
):
super(ConvModule, self).__init__(name=name, **kwargs)
self.adaptive_scale = adaptive_scale
if not adaptive_scale:
logger.info("No scaling, use preLN")
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else:
logger.info("Use scaling, no preLN")
self.scale = tf.Variable([1.] * input_dim, trainable=True, name=f'{name}_scale')
self.bias = tf.Variable([0.] * input_dim, trainable=True, name=f'{name}_bias')
pw1_max = input_dim ** -0.5
dw_max = kernel_size ** -0.5
pw2_max = input_dim ** -0.5
self.pw_conv_1 = tf.keras.layers.Conv2D(
filters=conv_expansion_rate * input_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw1_max, maxval=pw1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw1_max, maxval=pw1_max),
)
if conv_use_glu:
logger.info("Using GLU for Conv")
self.act1 = GLU(name=f"{name}_act_1")
else:
logger.info("Replace GLU with swish for Conv")
self.act1 = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act_1")
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1), strides=1,
padding="same", name=f"{name}_dw_conv",
depth_multiplier=depth_multiplier,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
self.bn = tf.keras.layers.experimental.SyncBatchNormalization(
name=f"{name}_bn",
momentum=0.985,
)
self.act2 = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_act_2")
self.pw_conv_2 = tf.keras.layers.Conv2D(
filters=input_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw2_max, maxval=pw2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw2_max, maxval=pw2_max),
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(self, inputs, training=False, pad_mask=None, **kwargs):
if not self.adaptive_scale:
outputs = self.ln(inputs, training=training)
else:
scale = tf.reshape(self.scale, (1, 1, -1))
bias = tf.reshape(self.bias, (1, 1, -1))
outputs = inputs * scale + bias
B, T, E = shape_util.shape_list(outputs)
outputs = tf.reshape(outputs, [B, T, 1, E])
outputs = self.pw_conv_1(outputs, training=training)
outputs = self.act1(outputs)
pad_mask = tf.expand_dims(tf.expand_dims(pad_mask, -1), -1)
outputs = outputs * tf.cast(pad_mask, "float32")
outputs = self.dw_conv(outputs, training=training)
outputs = self.bn(outputs, training=training)
outputs = self.act2(outputs)
outputs = self.pw_conv_2(outputs, training=training)
outputs = tf.reshape(outputs, [B, T, E])
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
class IdentityLayer(tf.keras.layers.Layer):
def call(self, inputs, *args, **kwargs):
return inputs
class MHSAFFModule(tf.keras.layers.Layer):
'''
Wrapper class for a MHSA layer followed by a FF layer
'''
def __init__(
self,
input_dim,
head_size,
num_heads,
dropout=0.0,
mha_type="relmha",
fc_factor=0.5,
ff_expansion_rate=4,
adaptive_scale=False,
name="mhsaff_module",
**kwargs,
):
super(MHSAFFModule, self).__init__(name=name, **kwargs)
assert input_dim == head_size * num_heads
self.mhsa = MHSAModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
adaptive_scale=adaptive_scale,
dropout=dropout,
name=f"{name}_mhsa",
)
self.ln_mid = tf.keras.layers.LayerNormalization(name=f"{name}_ln_mid")
self.ff = FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
ff_expansion_rate=ff_expansion_rate,
adaptive_scale=adaptive_scale,
name=f"{name}_ff",
)
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
def call(self, inputs, training=False, *args, **kwargs):
outputs = self.mhsa(inputs, training=training, *args, **kwargs)
outputs = self.ln_mid(outputs, training=training)
outputs = self.ff(outputs, training=training, *args, **kwargs)
outputs = self.ln(outputs, training=training)
return outputs
class ConvFFModule(tf.keras.layers.Layer):
'''
Wrapper class for a Conv layer followed by a FF layer
'''
def __init__(
self,
input_dim,
kernel_size=31,
dropout=0.0,
conv_expansion_rate=2,
conv_use_glu=False,
fc_factor=0.5,
ff_expansion_rate=4,
adaptive_scale=False,
name="convff_module",
**kwargs,
):
super(ConvFFModule, self).__init__(name=name, **kwargs)
self.conv = ConvModule(
input_dim=input_dim,
kernel_size=kernel_size,
conv_expansion_rate=conv_expansion_rate,
dropout=dropout,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
name=f"{name}_conv",
)
self.ln_mid = tf.keras.layers.LayerNormalization(name=f"{name}_ln_mid")
self.ff = FFModule(
input_dim=input_dim, dropout=dropout,
fc_factor=fc_factor,
ff_expansion_rate=ff_expansion_rate,
adaptive_scale=adaptive_scale,
name=f"{name}_ff",
)
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
def call(self, inputs, training=False, *args, **kwargs):
outputs = self.conv(inputs, training=training, *args, **kwargs)
outputs = self.ln_mid(outputs, training=training)
outputs = self.ff(outputs, training=training, *args, **kwargs)
outputs = self.ln(outputs, training=training)
return outputs
class ConformerBlock(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
head_size=36,
num_heads=4,
mha_type="relmha",
kernel_size=31,
name="conformer_block",
fixed_arch=None,
conv_use_glu=False,
no_post_ln=False,
adaptive_scale=False,
**kwargs,
):
assert input_dim == num_heads * head_size
super(ConformerBlock, self).__init__(name=name, **kwargs)
def get_fixed_arch(arch_type, name):
logger.info(f'layer type: {arch_type}')
if arch_type == 'f':
return FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'm':
return MHSAModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
dropout=dropout,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'c':
return ConvModule(
input_dim=input_dim,
kernel_size=kernel_size,
dropout=dropout,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'M':
return MHSAFFModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
dropout=dropout,
input_dim=input_dim,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 'C':
return ConvFFModule(
input_dim=input_dim,
kernel_size=kernel_size,
conv_use_glu=conv_use_glu,
dropout=dropout,
fc_factor=fc_factor,
adaptive_scale=adaptive_scale,
name=name,
)
elif arch_type == 's':
return IdentityLayer()
raise ValueError(f"fised architecture type '{arch_type}' is not defined")
####### Layer 1: MHSA ######
if fixed_arch is None:
arch_type = 'm'
else:
arch_type = fixed_arch[0]
self.layer1 = get_fixed_arch(arch_type, name+"_layer1")
####### Layer 2: FF ######
arch_type = 'f' if fixed_arch is None else fixed_arch[1]
self.layer2 = get_fixed_arch(arch_type, name+"_layer2")
####### Layer 3: CONV ######
arch_type = 'c' if fixed_arch is None else fixed_arch[2]
self.layer3 = get_fixed_arch(arch_type, name+"_layer3")
####### Layer 4: FF ######
arch_type = 'f' if fixed_arch is None else fixed_arch[3]
self.layer4 = get_fixed_arch(arch_type, name+"_layer4")
if not no_post_ln:
self.ln = tf.keras.layers.LayerNormalization(name=f"{name}_ln")
else: # we skip postLN for squeezenet as it has already been applied in MF or CF blocks
logger.info("Skipping post ln")
self.ln = None
def call(self, inputs, training=False, mask=None, pad_mask=None, **kwargs):
inputs, pos = inputs # pos is positional encoding
outputs = self.layer1(inputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer2(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer3(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
outputs = self.layer4(outputs, training=training, mask=mask, pos=pos, pad_mask=pad_mask, **kwargs)
if self.ln is not None:
outputs = self.ln(outputs, training=training)
return outputs
class ConformerEncoder(tf.keras.Model):
def __init__(
self,
subsampling,
dmodel=144,
num_blocks=16,
mha_type="relmha",
head_size=36,
num_heads=4,
kernel_size=31,
fc_factor=0.5,
dropout=0.0,
name="conformer_encoder",
fixed_arch=None,
conv_use_glu=None,
time_reduce_idx=None,
time_recover_idx=None,
no_post_ln=False,
ds_subsample=False,
adaptive_scale=False,
**kwargs,
):
super(ConformerEncoder, self).__init__(name=name, **kwargs)
if time_reduce_idx is None:
self.time_reduce = None
else:
if time_recover_idx is None:
self.time_reduce = 'normal' # no recovery at the end
else:
self.time_reduce = 'recover' # recovery at the end
assert len(time_reduce_idx) == len(time_recover_idx)
self.reduce_idx = time_reduce_idx
self.recover_idx = time_recover_idx
self.reduce_stride = 2
self.dmodel = dmodel
self.xscale = dmodel ** 0.5
subsampling_name = subsampling.pop("type", "conv2d")
if subsampling_name == "vgg":
raise NotImplementedError("VGG subsampling is not supported")
elif subsampling_name == "conv2d":
subsampling_class = Conv2dSubsampling
else:
raise ValueError("subsampling must be either 'conv2d' or 'vgg'")
self.conv_subsampling = subsampling_class(
**subsampling, ds=ds_subsample, name=f"{name}_subsampling",
)
self.pre_ln = tf.keras.layers.LayerNormalization(name=f"{name}_preln")
self.pe = PositionalEncoding(dmodel, name=f"{name}_pe")
linear_max = 5120 ** -0.5 # TODO: parameterize this later
self.linear = tf.keras.layers.Dense(
dmodel, name=f"{name}_linear",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-linear_max, maxval=linear_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-linear_max, maxval=linear_max),
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.conformer_blocks = []
recover_dmodels = []
recover_head_sizes = []
self.pe_time_reduction = []
self.time_reduction_layers = []
self.time_recover_layers = []
for i in range(num_blocks):
logger.info(f"Initialize block {i}")
if self.time_reduce is not None and i in self.reduce_idx:
recover_dmodel = dmodel
recover_dmodels.append(recover_dmodel) # push dmodel to recover later
recover_head_sizes.append(head_size) # push head size to recover later
logger.info(f"Reducing to dmodel {dmodel}, head_size {head_size}")
self.time_reduction_layers.append(
TimeReductionLayer(
recover_dmodel,
dmodel,
stride=self.reduce_stride,
name=f"{name}_timereduce",
)
)
self.pe_time_reduction.append(PositionalEncoding(dmodel, name=f"{name}_pe2"))
if self.time_reduce == 'recover' and i in self.recover_idx:
dmodel = recover_dmodels[-1] # pop dmodel for recovery
head_size = recover_head_sizes[-1] # pop head size for recovery
logger.info(f"recovering to dmodel {dmodel}, head_size {head_size}")
self.time_recover_layers.append(tf.keras.layers.Dense(dmodel))
recover_dmodels = recover_dmodels[:-1]
recover_head_sizes = recover_head_sizes[:-1]
conformer_block = ConformerBlock(
input_dim=dmodel,
dropout=dropout,
fc_factor=fc_factor,
head_size=head_size,
num_heads=num_heads,
mha_type=mha_type,
kernel_size=kernel_size,
name=f"{name}_block_{i}",
fixed_arch=None if fixed_arch is None else fixed_arch[i],
no_post_ln=no_post_ln,
conv_use_glu=conv_use_glu,
adaptive_scale=adaptive_scale,
)
self.conformer_blocks.append(conformer_block)
def call(self, inputs, length, training=False, mask=None, **kwargs):
# input with shape [B, T, V1, V2]
outputs = self.conv_subsampling(inputs, training=training)
outputs = self.linear(outputs, training=training)
padding, kernel_size, stride, num_subsample = 1, 3, 2, 2 #TODO: set these in __init__
for _ in range(num_subsample):
length = tf.math.ceil((tf.cast(length, tf.float32) + (2 * padding) - (kernel_size - 1) - 1) / float(stride) + 1)
pad_mask = tf.sequence_mask(length, maxlen=tf.shape(outputs)[1])
mask = tf.expand_dims(pad_mask, 1)
mask = tf.repeat(mask, repeats=[tf.shape(mask)[-1]], axis=1)
mask = tf.math.logical_and(tf.transpose(mask, perm=[0, 2, 1]), mask)
pe = self.pe(outputs)
outputs = outputs * self.xscale
outputs = self.do(outputs, training=training)
pe_org, mask_org = pe, mask
recover_activations = []
index = 0 # index to point the queues for pe, recover activations, etc.
outputs = self.pre_ln(outputs, training=training)
for i, cblock in enumerate(self.conformer_blocks):
if self.time_reduce is not None and i in self.reduce_idx:
recover_activations.append((outputs, mask, pad_mask, pe))
outputs, mask, pad_mask = self.time_reduction_layers[index](
outputs, training=training, mask=mask, pad_mask=pad_mask, **kwargs,
)
pe = self.pe_time_reduction[index](outputs)
index += 1
if self.time_reduce == 'recover' and i in self.recover_idx:
index -= 1
recover_activation, mask, pad_mask, pe = recover_activations[index]
B, T, E = shape_util.shape_list(outputs)
outputs = tf.repeat(outputs, [self.reduce_stride] * T, axis=1)
B, T, E = shape_util.shape_list(recover_activation)
outputs = self.time_recover_layers[index](outputs[:, :T, :], training=training)
outputs = outputs + recover_activation
outputs = cblock([outputs, pe], training=training, mask=mask, pad_mask=pad_mask, **kwargs)
return outputs
| 23,551 | 39.191126 | 124 | py |
Squeezeformer | Squeezeformer-main/src/models/ctc.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import numpy as np
import tensorflow as tf
from .base_model import BaseModel
from ..featurizers.speech_featurizers import TFSpeechFeaturizer
from ..featurizers.text_featurizers import TextFeaturizer
from ..utils import math_util, shape_util, data_util
from ..losses.ctc_loss import CtcLoss
logger = tf.get_logger()
class CtcModel(BaseModel):
def __init__(
self,
encoder: tf.keras.Model,
decoder: Union[tf.keras.Model, tf.keras.layers.Layer] = None,
augmentation: tf.keras.Model = None,
vocabulary_size: int = None,
**kwargs,
):
super().__init__(**kwargs)
self.encoder = encoder
if decoder is None:
assert vocabulary_size is not None, "vocabulary_size must be set"
self.decoder = tf.keras.layers.Dense(units=vocabulary_size, name=f"{self.name}_logits")
else:
self.decoder = decoder
self.augmentation = augmentation
self.time_reduction_factor = 1
def make(self, input_shape, batch_size=None):
inputs = tf.keras.Input(input_shape, batch_size=batch_size, dtype=tf.float32)
inputs_length = tf.keras.Input(shape=[], batch_size=batch_size, dtype=tf.int32)
self(
data_util.create_inputs(
inputs=inputs,
inputs_length=inputs_length
),
training=False
)
def compile(self, optimizer, blank=0, run_eagerly=None, **kwargs):
loss = CtcLoss(blank=blank)
super().compile(loss=loss, optimizer=optimizer, run_eagerly=run_eagerly, **kwargs)
def add_featurizers(
self,
speech_featurizer: TFSpeechFeaturizer,
text_featurizer: TextFeaturizer,
):
self.speech_featurizer = speech_featurizer
self.text_featurizer = text_featurizer
def call(self, inputs, training=False, **kwargs):
x, x_length = inputs["inputs"], inputs["inputs_length"]
if training and self.augmentation is not None:
x = self.augmentation(x, x_length)
logits = self.encoder(x, x_length, training=training, **kwargs)
logits = self.decoder(logits, training=training, **kwargs)
return data_util.create_logits(
logits=logits,
logits_length=math_util.get_reduced_length(x_length, self.time_reduction_factor)
)
# -------------------------------- GREEDY -------------------------------------
@tf.function
def recognize_from_logits(self, logits: tf.Tensor, lengths: tf.Tensor):
probs = tf.nn.softmax(logits)
# blank is in the first index of `probs`, where `ctc_greedy_decoder` supposes it to be in the last index.
# threfore, we move the first column to the last column to be compatible with `ctc_greedy_decoder`
probs = tf.concat([probs[:, :, 1:], tf.expand_dims(probs[:, :, 0], -1)], axis=-1)
def _map(elems): return tf.numpy_function(self._perform_greedy, inp=[elems[0], elems[1]], Tout=tf.string)
return tf.map_fn(_map, (probs, lengths), fn_output_signature=tf.TensorSpec([], dtype=tf.string))
@tf.function
def recognize(self, inputs: Dict[str, tf.Tensor]):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
# send the first index (skip token) to the last index
# for compatibility with the ctc_decoders library
probs = tf.concat([probs[:, :, 1:], tf.expand_dims(probs[:, :, 0], -1)], axis=-1)
lengths = logits["logits_length"]
def map_fn(elem): return tf.numpy_function(self._perform_greedy, inp=[elem[0], elem[1]], Tout=tf.string)
return tf.map_fn(map_fn, [probs, lengths], fn_output_signature=tf.TensorSpec([], dtype=tf.string))
def _perform_greedy(self, probs: np.ndarray, length):
from ctc_decoders import ctc_greedy_decoder
decoded = ctc_greedy_decoder(probs[:length], vocabulary=self.text_featurizer.non_blank_tokens)
return tf.convert_to_tensor(decoded, dtype=tf.string)
# -------------------------------- BEAM SEARCH -------------------------------------
@tf.function
def recognize_beam(self, inputs: Dict[str, tf.Tensor], lm: bool = False):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
def map_fn(prob): return tf.numpy_function(self._perform_beam_search, inp=[prob, lm], Tout=tf.string)
return tf.map_fn(map_fn, probs, dtype=tf.string)
def _perform_beam_search(self, probs: np.ndarray, lm: bool = False):
from ctc_decoders import ctc_beam_search_decoder
decoded = ctc_beam_search_decoder(
probs_seq=probs,
vocabulary=self.text_featurizer.non_blank_tokens,
beam_size=self.text_featurizer.decoder_config.beam_width,
ext_scoring_func=self.text_featurizer.scorer if lm else None
)
decoded = decoded[0][-1]
return tf.convert_to_tensor(decoded, dtype=tf.string)
| 5,608 | 41.172932 | 113 | py |
Squeezeformer | Squeezeformer-main/src/models/conformer.py | import tensorflow as tf
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.framework import ops
from tensorflow.python.eager import def_function
from .ctc import CtcModel
from .conformer_encoder import ConformerEncoder
from ..augmentations.augmentation import SpecAugmentation
from ..utils import math_util
from ..utils.training_utils import (
_minimum_control_deps,
reduce_per_replica,
write_scalar_summaries,
)
class ConformerCtc(CtcModel):
def __init__(
self,
vocabulary_size: int,
encoder_subsampling: dict,
encoder_dmodel: int = 144,
encoder_num_blocks: int = 16,
encoder_head_size: int = 36,
encoder_num_heads: int = 4,
encoder_mha_type: str = "relmha",
encoder_kernel_size: int = 32,
encoder_fc_factor: float = 0.5,
encoder_dropout: float = 0,
encoder_time_reduce_idx : list = None,
encoder_time_recover_idx : list = None,
encoder_conv_use_glu: bool = False,
encoder_ds_subsample: bool = False,
encoder_no_post_ln: bool = False,
encoder_adaptive_scale: bool = False,
encoder_fixed_arch: list = None,
augmentation_config=None,
name: str = "conformer",
**kwargs,
) -> object:
assert encoder_dmodel == encoder_num_heads * encoder_head_size
if not isinstance(encoder_fixed_arch[0], list):
encoder_fixed_arch = [encoder_fixed_arch] * encoder_num_blocks
super().__init__(
encoder=ConformerEncoder(
subsampling=encoder_subsampling,
dmodel=encoder_dmodel,
num_blocks=encoder_num_blocks,
head_size=encoder_head_size,
num_heads=encoder_num_heads,
mha_type=encoder_mha_type,
kernel_size=encoder_kernel_size,
fc_factor=encoder_fc_factor,
dropout=encoder_dropout,
time_reduce_idx=encoder_time_reduce_idx,
time_recover_idx=encoder_time_recover_idx,
conv_use_glu=encoder_conv_use_glu,
ds_subsample=encoder_ds_subsample,
no_post_ln=encoder_no_post_ln,
adaptive_scale=encoder_adaptive_scale,
fixed_arch=encoder_fixed_arch,
name=f"{name}_encoder",
),
decoder=tf.keras.layers.Conv1D(
filters=vocabulary_size, kernel_size=1,
strides=1, padding="same",
name=f"{name}_logits"
),
augmentation = SpecAugmentation(
num_freq_masks=augmentation_config['freq_masking']['num_masks'],
freq_mask_len=augmentation_config['freq_masking']['mask_factor'],
num_time_masks=augmentation_config['time_masking']['num_masks'],
time_mask_prop=augmentation_config['time_masking']['p_upperbound'],
name=f"{name}_specaug"
) if augmentation_config is not None else None,
vocabulary_size=vocabulary_size,
name=name,
**kwargs
)
self.time_reduction_factor = self.encoder.conv_subsampling.time_reduction_factor
self.dmodel = encoder_dmodel
# The following functions override the original function
# in order to gather the outputs from multiple TPU cores
def make_train_function(self):
if self.train_function is not None:
return self.train_function
def step_function(model, iterator):
"""Runs a single training step."""
def run_step(data):
outputs = model.train_step(data)
# Ensure counter is updated only if `train_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._train_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(outputs, self.distribute_strategy)
write_scalar_summaries(outputs, step=model._train_counter) # pylint: disable=protected-access
return outputs
if self._steps_per_execution.numpy().item() == 1:
def train_function(iterator):
"""Runs a training execution with one step."""
return step_function(self, iterator)
else:
def train_function(iterator):
"""Runs a training execution with multiple steps."""
for _ in math_ops.range(self._steps_per_execution):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
train_function = def_function.function(
train_function, experimental_relax_shapes=True)
self.train_function = train_function
if self._cluster_coordinator:
self.train_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
train_function, args=(iterator,))
return self.train_function
def make_test_function(self):
if self.test_function is not None:
return self.test_function
def step_function(model, iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.test_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(outputs, self.distribute_strategy)
return outputs
if self._steps_per_execution.numpy().item() == 1:
def test_function(iterator):
"""Runs an evaluation execution with one step."""
return step_function(self, iterator)
else:
def test_function(iterator):
"""Runs an evaluation execution with multiple steps."""
for _ in math_ops.range(self._steps_per_execution):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
test_function = def_function.function(test_function, experimental_relax_shapes=True)
self.test_function = test_function
if self._cluster_coordinator:
self.test_function = lambda iterator: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function, args=(iterator,))
return self.test_function
class ConformerCtcAccumulate(ConformerCtc):
def __init__(self, n_gradients: int = 1, **kwargs) -> object:
super().__init__(**kwargs)
self.time_reduction_factor = self.encoder.conv_subsampling.time_reduction_factor
self.n_gradients = tf.constant(n_gradients, dtype=tf.int32, name="conformer/num_accumulated_gradients")
self.n_acum_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="conformer/accumulate_step")
def make(self, input_shape, batch_size=None):
super().make(input_shape, batch_size)
self.gradient_accumulation = [
tf.Variable(tf.zeros_like(v, dtype=tf.float32), trainable=False, name=f"{v.name}/cached_accumulated_gradient") for v in self.trainable_variables
]
def train_step(self, batch):
"""
Args:
batch ([tf.Tensor]): a batch of training data
Returns:
Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric
"""
self.n_acum_step.assign_add(1)
inputs, y_true = batch
loss, y_pred, gradients = self.gradient_step(inputs, y_true)
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign_add(gradients[i] / tf.cast(self.n_gradients, tf.float32))
tf.cond(tf.equal(self.n_acum_step, self.n_gradients), self.apply_accu_gradients, lambda: None)
self._metrics["loss"].update_state(loss)
if 'WER' in self._metrics:
self._metrics['WER'].update_state(y_true, y_pred)
return {m.name: m.result() for m in self.metrics}
def apply_accu_gradients(self):
# Apply accumulated gradients
self.optimizer.apply_gradients(zip(self.gradient_accumulation,
self.trainable_variables))
# Reset
self.n_acum_step.assign(0)
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign(
tf.zeros_like(self.trainable_variables[i], dtype=tf.float32)
)
| 9,029 | 39.493274 | 160 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/multihead_attention.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import tensorflow as tf
from src.utils import shape_util
logger = tf.get_logger()
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(
self,
num_heads,
head_size,
output_size: int = None,
dropout: float = 0.0,
use_projection_bias: bool = True,
return_attn_coef: bool = False,
**kwargs,
):
super(MultiHeadAttention, self).__init__(**kwargs)
if output_size is not None and output_size < 1:
raise ValueError("output_size must be a positive number")
self.head_size = head_size
self.num_heads = num_heads
self.output_size = output_size
self.use_projection_bias = use_projection_bias
self.return_attn_coef = return_attn_coef
self.dropout = tf.keras.layers.Dropout(dropout, name="dropout")
self._droput_rate = dropout
def build(self, input_shape):
num_query_features = input_shape[0][-1]
num_key_features = input_shape[1][-1]
num_value_features = (
input_shape[2][-1] if len(input_shape) > 2 else num_key_features
)
output_size = (
self.output_size if self.output_size is not None else num_value_features
)
input_max = (self.num_heads * self.head_size) ** -0.5
self.query = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.key = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.value = tf.keras.layers.Dense(
self.num_heads * self.head_size, activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.projection_kernel = self.add_weight(
name="projection_kernel",
shape=[self.num_heads, self.head_size, output_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
if self.use_projection_bias:
self.projection_bias = self.add_weight(
name="projection_bias",
shape=[output_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
else:
self.projection_bias = None
def call_qkv(self, query, key, value, training=False):
# verify shapes
if key.shape[-2] != value.shape[-2]:
raise ValueError(
"the number of elements in 'key' must be equal to "
"the same as the number of elements in 'value'"
)
# Linear transformations
query = self.query(query)
B, T, E = shape_util.shape_list(query)
query = tf.reshape(query, [B, T, self.num_heads, self.head_size])
key = self.key(key)
B, T, E = shape_util.shape_list(key)
key = tf.reshape(key, [B, T, self.num_heads, self.head_size])
value = self.value(value)
B, T, E = shape_util.shape_list(value)
value = tf.reshape(value, [B, T, self.num_heads, self.head_size])
return query, key, value
def call_attention(self, query, key, value, logits, training=False, mask=None):
# mask = attention mask with shape [B, Tquery, Tkey] with 1 is for positions we want to attend, 0 for masked
if mask is not None:
if len(mask.shape) < 2:
raise ValueError("'mask' must have at least 2 dimensions")
if query.shape[-3] != mask.shape[-2]:
raise ValueError(
"mask's second to last dimension must be equal to "
"the number of elements in 'query'"
)
if key.shape[-3] != mask.shape[-1]:
raise ValueError(
"mask's last dimension must be equal to the number of elements in 'key'"
)
# apply mask
if mask is not None:
mask = tf.cast(mask, tf.float32)
# possibly expand on the head dimension so broadcasting works
if len(mask.shape) != len(logits.shape):
mask = tf.expand_dims(mask, -3)
logits += -10e9 * (1.0 - mask)
attn_coef = tf.nn.softmax(logits)
# attention dropout
attn_coef_dropout = self.dropout(attn_coef, training=training)
# attention * value
multihead_output = tf.einsum("...HNM,...MHI->...NHI", attn_coef_dropout, value)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done.
output = tf.einsum("...NHI,HIO->...NO", multihead_output, self.projection_kernel)
if self.projection_bias is not None:
output += self.projection_bias
return output, attn_coef
def call(self, inputs, training=False, mask=None, **kwargs):
query, key, value = inputs
query, key, value = self.call_qkv(query, key, value, training=training)
# Scale dot-product, doing the division to either query or key
# instead of their product saves some computation
depth = tf.constant(self.head_size, dtype=tf.float32)
query /= tf.sqrt(depth)
# Calculate dot product attention
logits = tf.einsum("...NHO,...MHO->...HNM", query, key)
output, attn_coef = self.call_attention(query, key, value, logits,
training=training, mask=mask)
if self.return_attn_coef:
return output, attn_coef
else:
return output
def compute_output_shape(self, input_shape):
num_value_features = (
input_shape[2][-1] if len(input_shape) > 2 else input_shape[1][-1]
)
output_size = (
self.output_size if self.output_size is not None else num_value_features
)
output_shape = input_shape[0][:-1] + (output_size,)
if self.return_attn_coef:
num_query_elements = input_shape[0][-2]
num_key_elements = input_shape[1][-2]
attn_coef_shape = input_shape[0][:-2] + (
self.num_heads,
num_query_elements,
num_key_elements,
)
return output_shape, attn_coef_shape
else:
return output_shape
def get_config(self):
config = super().get_config()
config.update(
head_size=self.head_size,
num_heads=self.num_heads,
output_size=self.output_size,
dropout=self._droput_rate,
use_projection_bias=self.use_projection_bias,
return_attn_coef=self.return_attn_coef,
)
return config
class RelPositionMultiHeadAttention(MultiHeadAttention):
def __init__(self, kernel_sizes=None, strides=None, **kwargs):
super(RelPositionMultiHeadAttention, self).__init__(**kwargs)
def build(self, input_shape):
num_pos_features = input_shape[-1][-1]
input_max = (self.num_heads * self.head_size) ** -0.5
self.pos_kernel = self.add_weight(
name="pos_kernel",
shape=[self.num_heads, num_pos_features, self.head_size],
initializer=tf.keras.initializers.RandomUniform(minval=-input_max, maxval=input_max),
)
self.pos_bias_u = self.add_weight(
name="pos_bias_u",
shape=[self.num_heads, self.head_size],
initializer=tf.keras.initializers.Zeros(),
)
self.pos_bias_v = self.add_weight(
name="pos_bias_v",
shape=[self.num_heads, self.head_size],
initializer=tf.keras.initializers.Zeros(),
)
super(RelPositionMultiHeadAttention, self).build(input_shape[:-1])
@staticmethod
def relative_shift(x):
x_shape = tf.shape(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])
x = tf.reshape(x[:, :, 1:, :], x_shape)
return x
def call(self, inputs, training=False, mask=None, **kwargs):
query, key, value, pos = inputs
query, key, value = self.call_qkv(query, key, value, training=training)
pos = tf.einsum("...MI,HIO->...MHO", pos, self.pos_kernel)
query_with_u = query + self.pos_bias_u
query_with_v = query + self.pos_bias_v
logits_with_u = tf.einsum("...NHO,...MHO->...HNM", query_with_u, key)
logits_with_v = tf.einsum("...NHO,...MHO->...HNM", query_with_v, pos)
logits_with_v = self.relative_shift(logits_with_v)
logits = logits_with_u + logits_with_v[:, :, :, :tf.shape(logits_with_u)[3]]
depth = tf.constant(self.head_size, dtype=tf.float32)
logits /= tf.sqrt(depth)
output, attn_coef = self.call_attention(query, key, value, logits,
training=training, mask=mask)
if self.return_attn_coef:
return output, attn_coef
else:
return output
| 10,251 | 37.397004 | 116 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/time_reduction.py | import tensorflow as tf
from ...utils import shape_util
class TimeReductionLayer(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
output_dim,
kernel_size=5,
stride=2,
dropout=0.0,
name="time_reduction",
**kwargs,
):
super(TimeReductionLayer, self).__init__(name=name, **kwargs)
self.stride = stride
self.kernel_size = kernel_size
dw_max = kernel_size ** -0.5
pw_max = input_dim ** -0.5
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1), strides=self.stride,
padding="valid", name=f"{name}_dw_conv",
depth_multiplier=1,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
#self.swish = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_swish_activation")
self.pw_conv = tf.keras.layers.Conv2D(
filters=output_dim, kernel_size=1, strides=1,
padding="valid", name=f"{name}_pw_conv_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
)
def call(self, inputs, training=False, mask=None, pad_mask=None, **kwargs):
B, T, E = shape_util.shape_list(inputs)
outputs = tf.reshape(inputs, [B, T, 1, E])
_pad_mask = tf.expand_dims(tf.expand_dims(pad_mask, -1), -1)
outputs = outputs * tf.cast(_pad_mask, "float32")
padding = max(0, self.kernel_size - self.stride)
outputs = tf.pad(outputs, [[0, 0], [0, padding], [0, 0], [0, 0]])
outputs = self.dw_conv(outputs, training=training)
outputs = self.pw_conv(outputs, training=training)
B, T, _, E = shape_util.shape_list(outputs)
outputs = tf.reshape(outputs, [B, T, E])
mask = mask[:, ::self.stride, ::self.stride]
pad_mask = pad_mask[:, ::self.stride]
_, L = shape_util.shape_list(pad_mask)
outputs = tf.pad(outputs, [[0, 0], [0, L - T], [0, 0]])
return outputs, mask, pad_mask
| 2,287 | 42.169811 | 101 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/subsampling.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from src.utils import shape_util, math_util
logger = tf.get_logger()
class Conv2dSubsampling(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
strides: int = 2,
kernel_size: int = 3,
ds: bool = False,
name="Conv2dSubsampling",
**kwargs,
):
super(Conv2dSubsampling, self).__init__(name=name, **kwargs)
self.strides = strides
self.kernel_size = kernel_size
assert self.strides == 2 and self.kernel_size == 3 # Fix this for simplicity
conv1_max = kernel_size ** -1
conv2_max = (kernel_size ** 2 * filters) ** -0.5
self.conv1 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="valid", name=f"{name}_1",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-conv1_max, maxval=conv1_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-conv1_max, maxval=conv1_max),
)
self.ds = ds
if not ds:
logger.info("Subsampling with full conv")
self.conv2 = tf.keras.layers.Conv2D(
filters=filters, kernel_size=kernel_size,
strides=strides, padding="valid", name=f"{name}_2",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-conv2_max, maxval=conv2_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-conv2_max, maxval=conv2_max),
)
self.time_reduction_factor = self.conv1.strides[0] + self.conv2.strides[0]
else:
logger.info("Subsampling with DS conv")
dw_max = (kernel_size ** 2) ** -0.5
pw_max = filters ** -0.5
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, kernel_size), strides=strides,
padding="valid", name=f"{name}_2_dw",
depth_multiplier=1,
depthwise_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-dw_max, maxval=dw_max),
)
self.pw_conv = tf.keras.layers.Conv2D(
filters=filters, kernel_size=1, strides=1,
padding="valid", name=f"{name}_2_pw",
kernel_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
bias_initializer=tf.keras.initializers.RandomUniform(minval=-pw_max, maxval=pw_max),
)
self.time_reduction_factor = self.conv1.strides[0] + self.dw_conv.strides[0]
def call(self, inputs, training=False, **kwargs):
_, L, H, _ = shape_util.shape_list(inputs)
assert H == 80
outputs = tf.pad(inputs, [[0, 0], [0, 1], [0, 1], [0, 0]])
outputs = self.conv1(outputs, training=training)
outputs = tf.nn.relu(outputs)
outputs = tf.pad(outputs, [[0, 0], [0, 1], [0, 1], [0, 0]])
if not self.ds:
outputs = self.conv2(outputs, training=training)
else:
outputs = self.dw_conv(outputs, training=training)
outputs = self.pw_conv(outputs, training=training)
outputs = tf.nn.relu(outputs)
_, L, H, _ = shape_util.shape_list(outputs)
assert H == 20
return math_util.merge_two_last_dims(outputs)
def get_config(self):
conf = super(Conv2dSubsampling, self).get_config()
conf.update(self.conv1.get_config())
conf.update(self.conv2.get_config())
return conf
| 4,228 | 43.989362 | 108 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/glu.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class GLU(tf.keras.layers.Layer):
def __init__(self,
axis=-1,
name="glu_activation",
**kwargs):
super(GLU, self).__init__(name=name, **kwargs)
self.axis = axis
def call(self, inputs, **kwargs):
a, b = tf.split(inputs, 2, axis=self.axis)
b = tf.nn.sigmoid(b)
return tf.multiply(a, b)
def get_config(self):
conf = super(GLU, self).get_config()
conf.update({"axis": self.axis})
return conf
| 1,132 | 31.371429 | 74 | py |
Squeezeformer | Squeezeformer-main/src/models/submodules/positional_encoding.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from src.utils.shape_util import shape_list
class PositionalEncoding(tf.keras.layers.Layer):
'''
Same positional encoding method as NeMo library
'''
def __init__(self, d_model, max_len=5000, name="positional_encoding_nemo", **kwargs):
super().__init__(trainable=False, name=name, **kwargs)
self.max_len = max_len
positions = tf.expand_dims(tf.range(self.max_len - 1, -max_len, -1.0, dtype=tf.float32), axis=1)
pos_length = tf.shape(positions)[0]
pe = np.zeros([pos_length, d_model], 'float32')
div_term = np.exp(
tf.range(0, d_model, 2, dtype=tf.float32) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = np.sin(positions * div_term)
pe[:, 1::2] = np.cos(positions * div_term)
pe = tf.convert_to_tensor(pe)
self.pe = tf.expand_dims(pe, 0)
def call(self, inputs, **kwargs):
# inputs shape [B, T, V]
_, length, dmodel = shape_list(inputs)
center_pos = tf.shape(self.pe)[1] // 2
start_pos = center_pos - length + 1
end_pos = center_pos + length
pos_emb = self.pe[:, start_pos:end_pos]
return tf.cast(pos_emb, dtype=inputs.dtype)
def get_config(self):
conf = super().get_config()
return conf.update({"max_len": self.max_len})
| 1,965 | 38.32 | 104 | py |
Squeezeformer | Squeezeformer-main/src/augmentations/augmentation.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from ..utils import shape_util
class SpecAugmentation(tf.keras.Model):
def __init__(
self,
num_freq_masks=2,
freq_mask_len=27,
num_time_masks=5,
time_mask_prop=0.05,
name='specaug',
**kwargs,
):
super(SpecAugmentation, self).__init__(name=name, **kwargs)
self.num_freq_masks = num_freq_masks
self.freq_mask_len = freq_mask_len
self.num_time_masks = num_time_masks
self.time_mask_prop = time_mask_prop
def time_mask(self, inputs, inputs_len):
time_max = inputs_len
B, T, F = tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2]
t = tf.random.uniform(shape=tf.shape(time_max), minval=0, maxval=self.time_mask_prop)
t = tf.cast(tf.cast(time_max, tf.dtypes.float32) * t, 'int32')
t0 = tf.random.uniform(shape=tf.shape(time_max), minval=0, maxval=1)
t0 = tf.cast(tf.cast(time_max - t, tf.dtypes.float32) * t0, 'int32')
t = tf.repeat(tf.reshape(t, (-1, 1)), T, axis=1)
t0 = tf.repeat(tf.reshape(t0, (-1, 1)), T, axis=1)
indices = tf.repeat(tf.reshape(tf.range(T), (1, -1)), B, axis=0)
left_mask = tf.cast(tf.math.greater_equal(indices, t0), 'float32')
right_mask = tf.cast(tf.math.less(indices, t0 + t), 'float32')
mask = 1.0 - left_mask * right_mask
masked_inputs = inputs * tf.reshape(mask, (B, T, 1, 1))
return masked_inputs
def frequency_mask(self, inputs, inputs_len):
B, T, F = tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2]
f = tf.random.uniform(shape=tf.shape(inputs_len), minval=0, maxval=self.freq_mask_len, dtype='int32')
f0 = tf.random.uniform(shape=tf.shape(inputs_len), minval=0, maxval=1)
f0 = tf.cast(tf.cast(F - f, tf.dtypes.float32) * f0, 'int32')
f = tf.repeat(tf.reshape(f, (-1, 1)), F, axis=1)
f0 = tf.repeat(tf.reshape(f0, (-1, 1)), F, axis=1)
indices = tf.repeat(tf.reshape(tf.range(F), (1, -1)), B, axis=0)
left_mask = tf.cast(tf.math.greater_equal(indices, f0), 'float32')
right_mask = tf.cast(tf.math.less(indices, f0 + f), 'float32')
mask = 1.0 - left_mask * right_mask
masked_inputs = inputs * tf.reshape(mask, (B, 1, F, 1))
return masked_inputs
@tf.function
def call(self, inputs, inputs_len):
masked_inputs = inputs
for _ in range(self.num_time_masks):
masked_inputs = self.time_mask(masked_inputs, inputs_len)
for _ in range(self.num_freq_masks):
masked_inputs = self.frequency_mask(masked_inputs, inputs_len)
return masked_inputs
| 3,294 | 38.698795 | 109 | py |
Squeezeformer | Squeezeformer-main/src/featurizers/speech_featurizers.py | # Copyright 2020 Huy Le Nguyen (@usimarit) and Huy Phan (@pquochuy)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import abc
import math
from typing import Union
import numpy as np
import librosa
import soundfile as sf
import tensorflow as tf
import tensorflow_io as tfio
from ..utils import math_util, env_util
def load_and_convert_to_wav(path: str) -> tf.Tensor:
wave, rate = librosa.load(os.path.expanduser(path), sr=None, mono=True)
return tf.audio.encode_wav(tf.expand_dims(wave, axis=-1), sample_rate=rate)
def read_raw_audio(audio: Union[str, bytes, np.ndarray], sample_rate=16000) -> np.ndarray:
if isinstance(audio, str):
wave, _ = librosa.load(os.path.expanduser(audio), sr=sample_rate, mono=True)
elif isinstance(audio, bytes):
wave, sr = sf.read(io.BytesIO(audio))
if wave.ndim > 1: wave = np.mean(wave, axis=-1)
wave = np.asfortranarray(wave)
if sr != sample_rate: wave = librosa.resample(wave, sr, sample_rate)
elif isinstance(audio, np.ndarray):
if audio.ndim > 1: ValueError("input audio must be single channel")
return audio
else:
raise ValueError("input audio must be either a path or bytes")
return wave
def tf_read_raw_audio(audio: tf.Tensor, sample_rate=16000) -> tf.Tensor:
wave, rate = tf.audio.decode_wav(audio, desired_channels=1, desired_samples=-1)
if not env_util.has_devices("TPU"):
resampled = tfio.audio.resample(wave, rate_in=tf.cast(rate, dtype=tf.int64), rate_out=sample_rate)
return tf.reshape(resampled, shape=[-1]) # reshape for using tf.signal
return tf.reshape(wave, shape=[-1]) # reshape for using tf.signal
def slice_signal(signal, window_size, stride=0.5) -> np.ndarray:
""" Return windows of the given signal by sweeping in stride fractions of window """
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
slice_ = signal[beg_i:end_i]
if slice_.shape[0] < window_size:
slice_ = np.pad(
slice_, (0, window_size - slice_.shape[0]), 'constant', constant_values=0.0)
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.float32)
def tf_merge_slices(slices: tf.Tensor) -> tf.Tensor:
# slices shape = [batch, window_size]
return tf.keras.backend.flatten(slices) # return shape = [-1, ]
def merge_slices(slices: np.ndarray) -> np.ndarray:
# slices shape = [batch, window_size]
return np.reshape(slices, [-1])
def tf_normalize_audio_features(audio_feature: tf.Tensor, per_frame=False) -> tf.Tensor:
"""
TF Mean and variance features normalization
Args:
audio_feature: tf.Tensor with shape [T, F]
Returns:
normalized audio features with shape [T, F]
"""
axis = 1 if per_frame else None
mean = tf.reduce_mean(audio_feature, axis=axis, keepdims=True)
std_dev = tf.math.sqrt(tf.math.reduce_variance(audio_feature, axis=axis, keepdims=True) + 1e-9)
return (audio_feature - mean) / std_dev
def tf_normalize_signal(signal: tf.Tensor) -> tf.Tensor:
"""
TF Normailize signal to [-1, 1] range
Args:
signal: tf.Tensor with shape [None]
Returns:
normalized signal with shape [None]
"""
gain = 1.0 / (tf.reduce_max(tf.abs(signal), axis=-1) + 1e-9)
return signal * gain
def tf_preemphasis(signal: tf.Tensor, coeff=0.97):
"""
TF Pre-emphasis
Args:
signal: tf.Tensor with shape [None]
coeff: Float that indicates the preemphasis coefficient
Returns:
pre-emphasized signal with shape [None]
"""
if not coeff or coeff <= 0.0: return signal
s0 = tf.expand_dims(signal[0], axis=-1)
s1 = signal[1:] - coeff * signal[:-1]
return tf.concat([s0, s1], axis=-1)
def tf_depreemphasis(signal: tf.Tensor, coeff=0.97) -> tf.Tensor:
"""
TF Depreemphasis
Args:
signal: tf.Tensor with shape [B, None]
coeff: Float that indicates the preemphasis coefficient
Returns:
depre-emphasized signal with shape [B, None]
"""
if not coeff or coeff <= 0.0: return signal
def map_fn(elem):
x = tf.expand_dims(elem[0], axis=-1)
for n in range(1, elem.shape[0], 1):
current = coeff * x[n - 1] + elem[n]
x = tf.concat([x, [current]], axis=0)
return x
return tf.map_fn(map_fn, signal)
class TFSpeechFeaturizer(metaclass=abc.ABCMeta):
def __init__(self, speech_config: dict):
"""
speech_config = {
"sample_rate": int,
"frame_ms": int,
"stride_ms": int,
"num_feature_bins": int,
"feature_type": str,
"delta": bool,
"delta_delta": bool,
"pitch": bool,
"normalize_signal": bool,
"normalize_feature": bool,
"normalize_per_frame": bool
}
"""
# Samples
self.sample_rate = speech_config.get("sample_rate", 16000)
self.frame_length = int(self.sample_rate * (speech_config.get("frame_ms", 25) / 1000))
self.frame_step = int(self.sample_rate * (speech_config.get("stride_ms", 10) / 1000))
# Features
self.num_feature_bins = speech_config.get("num_feature_bins", 80)
self.feature_type = speech_config.get("feature_type", "log_mel_spectrogram")
self.preemphasis = speech_config.get("preemphasis", None)
self.top_db = speech_config.get("top_db", 80.0)
# Normalization
self.normalize_signal = speech_config.get("normalize_signal", True)
self.normalize_feature = speech_config.get("normalize_feature", True)
self.normalize_per_frame = speech_config.get("normalize_per_frame", False)
self.center = speech_config.get("center", True)
# Length
self.max_length = 0
@property
def shape(self) -> list:
length = self.max_length if self.max_length > 0 else None
return [length, self.num_feature_bins, 1]
@property
def nfft(self) -> int:
""" Number of FFT """
return 2 ** (self.frame_length - 1).bit_length()
def get_length_from_duration(self, duration):
nsamples = math.ceil(float(duration) * self.sample_rate)
if self.center: nsamples += self.nfft
return 1 + (nsamples - self.nfft) // self.frame_step # https://www.tensorflow.org/api_docs/python/tf/signal/frame
def update_length(self, length: int):
self.max_length = max(self.max_length, length)
def reset_length(self):
self.max_length = 0
def stft(self, signal):
if self.center: signal = tf.pad(signal, [[self.nfft // 2, self.nfft // 2]], mode="REFLECT")
window = tf.signal.hann_window(self.frame_length, periodic=True)
left_pad = (self.nfft - self.frame_length) // 2
right_pad = self.nfft - self.frame_length - left_pad
window = tf.pad(window, [[left_pad, right_pad]])
framed_signals = tf.signal.frame(signal, frame_length=self.nfft, frame_step=self.frame_step)
framed_signals *= window
return tf.square(tf.abs(tf.signal.rfft(framed_signals, [self.nfft])))
def power_to_db(self, S, amin=1e-10):
log_spec = 10.0 * math_util.log10(tf.maximum(amin, S))
log_spec -= 10.0 * math_util.log10(tf.maximum(amin, 1.0))
if self.top_db is not None:
if self.top_db < 0:
raise ValueError('top_db must be non-negative')
log_spec = tf.maximum(log_spec, tf.reduce_max(log_spec) - self.top_db)
return log_spec
def extract(self, signal: np.ndarray) -> np.ndarray:
signal = np.asfortranarray(signal)
features = self.tf_extract(tf.convert_to_tensor(signal, dtype=tf.float32))
return features.numpy()
def tf_extract(self, signal: tf.Tensor) -> tf.Tensor:
"""
Extract speech features from signals (for using in tflite)
Args:
signal: tf.Tensor with shape [None]
Returns:
features: tf.Tensor with shape [T, F, 1]
"""
if self.normalize_signal:
signal = tf_normalize_signal(signal)
signal = tf_preemphasis(signal, self.preemphasis)
if self.feature_type == "log_mel_spectrogram":
features = self.compute_log_mel_spectrogram(signal)
else:
raise ValueError("feature_type must be 'log_mel_spectrogram'")
features = tf.expand_dims(features, axis=-1)
if self.normalize_feature:
features = tf_normalize_audio_features(features, per_frame=self.normalize_per_frame)
return features
def compute_log_mel_spectrogram(self, signal):
spectrogram = self.stft(signal)
linear_to_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_feature_bins,
num_spectrogram_bins=spectrogram.shape[-1],
sample_rate=self.sample_rate,
lower_edge_hertz=0.0, upper_edge_hertz=(self.sample_rate / 2)
)
mel_spectrogram = tf.tensordot(spectrogram, linear_to_weight_matrix, 1)
return self.power_to_db(mel_spectrogram)
| 9,943 | 36.104478 | 122 | py |
Subsets and Splits