code
stringlengths
1
5.19M
package
stringlengths
1
81
path
stringlengths
9
304
filename
stringlengths
4
145
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Tests for detection_inference.py.""" import os import unittest import numpy as np from PIL import Image import six import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.core import standard_fields from object_detection.inference import detection_inference from object_detection.utils import dataset_util from object_detection.utils import tf_version def get_mock_tfrecord_path(): return os.path.join(tf.test.get_temp_dir(), 'mock.tfrec') def create_mock_tfrecord(): pil_image = Image.fromarray(np.array([[[123, 0, 0]]], dtype=np.uint8), 'RGB') image_output_stream = six.BytesIO() pil_image.save(image_output_stream, format='png') encoded_image = image_output_stream.getvalue() feature_map = { 'test_field': dataset_util.float_list_feature([1, 2, 3, 4]), standard_fields.TfExampleFields.image_encoded: dataset_util.bytes_feature(encoded_image), } tf_example = tf.train.Example(features=tf.train.Features(feature=feature_map)) with tf.python_io.TFRecordWriter(get_mock_tfrecord_path()) as writer: writer.write(tf_example.SerializeToString()) return encoded_image def get_mock_graph_path(): return os.path.join(tf.test.get_temp_dir(), 'mock_graph.pb') def create_mock_graph(): g = tf.Graph() with g.as_default(): in_image_tensor = tf.placeholder( tf.uint8, shape=[1, None, None, 3], name='image_tensor') tf.constant([2.0], name='num_detections') tf.constant( [[[0, 0.8, 0.7, 1], [0.1, 0.2, 0.8, 0.9], [0.2, 0.3, 0.4, 0.5]]], name='detection_boxes') tf.constant([[0.1, 0.2, 0.3]], name='detection_scores') tf.identity( tf.constant([[1.0, 2.0, 3.0]]) * tf.reduce_sum(tf.cast(in_image_tensor, dtype=tf.float32)), name='detection_classes') graph_def = g.as_graph_def() with tf.gfile.Open(get_mock_graph_path(), 'w') as fl: fl.write(graph_def.SerializeToString()) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class InferDetectionsTests(tf.test.TestCase): def test_simple(self): create_mock_graph() encoded_image = create_mock_tfrecord() serialized_example_tensor, image_tensor = detection_inference.build_input( [get_mock_tfrecord_path()]) self.assertAllEqual(image_tensor.get_shape().as_list(), [1, None, None, 3]) (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) = detection_inference.build_inference_graph( image_tensor, get_mock_graph_path()) with self.test_session(use_gpu=False) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) tf.train.start_queue_runners() tf_example = detection_inference.infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, False) expected_example = tf.train.Example() text_format.Merge(r""" features { feature { key: "image/detection/bbox/ymin" value { float_list { value: [0.0, 0.1] } } } feature { key: "image/detection/bbox/xmin" value { float_list { value: [0.8, 0.2] } } } feature { key: "image/detection/bbox/ymax" value { float_list { value: [0.7, 0.8] } } } feature { key: "image/detection/bbox/xmax" value { float_list { value: [1.0, 0.9] } } } feature { key: "image/detection/label" value { int64_list { value: [123, 246] } } } feature { key: "image/detection/score" value { float_list { value: [0.1, 0.2] } } } feature { key: "test_field" value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }""", expected_example) expected_example.features.feature[ standard_fields.TfExampleFields .image_encoded].CopyFrom(dataset_util.bytes_feature(encoded_image)) self.assertProtoEquals(expected_example, tf_example) def test_discard_image(self): create_mock_graph() create_mock_tfrecord() serialized_example_tensor, image_tensor = detection_inference.build_input( [get_mock_tfrecord_path()]) (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) = detection_inference.build_inference_graph( image_tensor, get_mock_graph_path()) with self.test_session(use_gpu=False) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) tf.train.start_queue_runners() tf_example = detection_inference.infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, True) self.assertProtoEquals(r""" features { feature { key: "image/detection/bbox/ymin" value { float_list { value: [0.0, 0.1] } } } feature { key: "image/detection/bbox/xmin" value { float_list { value: [0.8, 0.2] } } } feature { key: "image/detection/bbox/ymax" value { float_list { value: [0.7, 0.8] } } } feature { key: "image/detection/bbox/xmax" value { float_list { value: [1.0, 0.9] } } } feature { key: "image/detection/label" value { int64_list { value: [123, 246] } } } feature { key: "image/detection/score" value { float_list { value: [0.1, 0.2] } } } feature { key: "test_field" value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } } """, tf_example) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inference/detection_inference_tf1_test.py
detection_inference_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Infers detections on a TFRecord of TFExamples given an inference graph. Example usage: ./infer_detections \ --input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \ --output_tfrecord_path=/path/to/output/detections.tfrecord \ --inference_graph=/path/to/frozen_weights_inference_graph.pb The output is a TFRecord of TFExamples. Each TFExample from the input is first augmented with detections from the inference graph and then copied to the output. The input and output nodes of the inference graph are expected to have the same types, shapes, and semantics, as the input and output nodes of graphs produced by export_inference_graph.py, when run with --input_type=image_tensor. The script can also discard the image pixels in the output. This greatly reduces the output size and can potentially accelerate reading data in subsequent processing steps that don't require the images (e.g. computing metrics). """ import itertools import tensorflow.compat.v1 as tf from object_detection.inference import detection_inference tf.flags.DEFINE_string('input_tfrecord_paths', None, 'A comma separated list of paths to input TFRecords.') tf.flags.DEFINE_string('output_tfrecord_path', None, 'Path to the output TFRecord.') tf.flags.DEFINE_string('inference_graph', None, 'Path to the inference graph with embedded weights.') tf.flags.DEFINE_boolean('discard_image_pixels', False, 'Discards the images in the output TFExamples. This' ' significantly reduces the output size and is useful' ' if the subsequent tools don\'t need access to the' ' images (e.g. when computing evaluation measures).') FLAGS = tf.flags.FLAGS def main(_): tf.logging.set_verbosity(tf.logging.INFO) required_flags = ['input_tfrecord_paths', 'output_tfrecord_path', 'inference_graph'] for flag_name in required_flags: if not getattr(FLAGS, flag_name): raise ValueError('Flag --{} is required'.format(flag_name)) with tf.Session() as sess: input_tfrecord_paths = [ v for v in FLAGS.input_tfrecord_paths.split(',') if v] tf.logging.info('Reading input from %d files', len(input_tfrecord_paths)) serialized_example_tensor, image_tensor = detection_inference.build_input( input_tfrecord_paths) tf.logging.info('Reading graph and building model...') (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) = detection_inference.build_inference_graph( image_tensor, FLAGS.inference_graph) tf.logging.info('Running inference and writing output to {}'.format( FLAGS.output_tfrecord_path)) sess.run(tf.local_variables_initializer()) tf.train.start_queue_runners() with tf.python_io.TFRecordWriter( FLAGS.output_tfrecord_path) as tf_record_writer: try: for counter in itertools.count(): tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10, counter) tf_example = detection_inference.infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, FLAGS.discard_image_pixels) tf_record_writer.write(tf_example.SerializeToString()) except tf.errors.OutOfRangeError: tf.logging.info('Finished processing records') if __name__ == '__main__': tf.app.run()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inference/infer_detections.py
infer_detections.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions for detection inference.""" from __future__ import division import tensorflow.compat.v1 as tf from object_detection.core import standard_fields def build_input(tfrecord_paths): """Builds the graph's input. Args: tfrecord_paths: List of paths to the input TFRecords Returns: serialized_example_tensor: The next serialized example. String scalar Tensor image_tensor: The decoded image of the example. Uint8 tensor, shape=[1, None, None,3] """ filename_queue = tf.train.string_input_producer( tfrecord_paths, shuffle=False, num_epochs=1) tf_record_reader = tf.TFRecordReader() _, serialized_example_tensor = tf_record_reader.read(filename_queue) features = tf.parse_single_example( serialized_example_tensor, features={ standard_fields.TfExampleFields.image_encoded: tf.FixedLenFeature([], tf.string), }) encoded_image = features[standard_fields.TfExampleFields.image_encoded] image_tensor = tf.image.decode_image(encoded_image, channels=3) image_tensor.set_shape([None, None, 3]) image_tensor = tf.expand_dims(image_tensor, 0) return serialized_example_tensor, image_tensor def build_inference_graph(image_tensor, inference_graph_path): """Loads the inference graph and connects it to the input image. Args: image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3] inference_graph_path: Path to the inference graph with embedded weights Returns: detected_boxes_tensor: Detected boxes. Float tensor, shape=[num_detections, 4] detected_scores_tensor: Detected scores. Float tensor, shape=[num_detections] detected_labels_tensor: Detected labels. Int64 tensor, shape=[num_detections] """ with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file: graph_content = graph_def_file.read() graph_def = tf.GraphDef() graph_def.MergeFromString(graph_content) tf.import_graph_def( graph_def, name='', input_map={'image_tensor': image_tensor}) g = tf.get_default_graph() num_detections_tensor = tf.squeeze( g.get_tensor_by_name('num_detections:0'), 0) num_detections_tensor = tf.cast(num_detections_tensor, tf.int32) detected_boxes_tensor = tf.squeeze( g.get_tensor_by_name('detection_boxes:0'), 0) detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor] detected_scores_tensor = tf.squeeze( g.get_tensor_by_name('detection_scores:0'), 0) detected_scores_tensor = detected_scores_tensor[:num_detections_tensor] detected_labels_tensor = tf.squeeze( g.get_tensor_by_name('detection_classes:0'), 0) detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64) detected_labels_tensor = detected_labels_tensor[:num_detections_tensor] return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor def infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, discard_image_pixels): """Runs the supplied tensors and adds the inferred detections to the example. Args: serialized_example_tensor: Serialized TF example. Scalar string tensor detected_boxes_tensor: Detected boxes. Float tensor, shape=[num_detections, 4] detected_scores_tensor: Detected scores. Float tensor, shape=[num_detections] detected_labels_tensor: Detected labels. Int64 tensor, shape=[num_detections] discard_image_pixels: If true, discards the image from the result Returns: The de-serialized TF example augmented with the inferred detections. """ tf_example = tf.train.Example() (serialized_example, detected_boxes, detected_scores, detected_classes) = tf.get_default_session().run([ serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor ]) detected_boxes = detected_boxes.T tf_example.ParseFromString(serialized_example) feature = tf_example.features.feature feature[standard_fields.TfExampleFields. detection_score].float_list.value[:] = detected_scores feature[standard_fields.TfExampleFields. detection_bbox_ymin].float_list.value[:] = detected_boxes[0] feature[standard_fields.TfExampleFields. detection_bbox_xmin].float_list.value[:] = detected_boxes[1] feature[standard_fields.TfExampleFields. detection_bbox_ymax].float_list.value[:] = detected_boxes[2] feature[standard_fields.TfExampleFields. detection_bbox_xmax].float_list.value[:] = detected_boxes[3] feature[standard_fields.TfExampleFields. detection_class_label].int64_list.value[:] = detected_classes if discard_image_pixels: del feature[standard_fields.TfExampleFields.image_encoded] return tf_example
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inference/detection_inference.py
detection_inference.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD feature extractors based on Resnet v1 and PPN architectures.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import resnet_v1 class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD feature extractor based on resnet architecture and PPN.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_base_fn, resnet_scope_name, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, base_feature_map_depth=1024, num_layers=6, override_base_feature_extractor_hyperparams=False, use_bounded_activations=False): """Resnet based PPN Feature Extractor for SSD Models. See go/pooling-pyramid for more details about PPN. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. resnet_base_fn: base resnet network to use. resnet_scope_name: scope name to construct resnet reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. base_feature_map_depth: Depth of the base feature before the max pooling. num_layers: Number of layers used to make predictions. They are pooled from the base feature. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. use_bounded_activations: Whether or not to use bounded activations for resnet v1 bottleneck residual unit. Bounded activations better lend themselves to quantized inference. """ super(_SSDResnetPpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams) self._resnet_base_fn = resnet_base_fn self._resnet_scope_name = resnet_scope_name self._base_feature_map_depth = base_feature_map_depth self._num_layers = num_layers self._use_bounded_activations = use_bounded_activations def _filter_features(self, image_features): # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead # of munging the scope here. filtered_image_features = dict({}) for key, feature in image_features.items(): feature_name = key.split('/')[-1] if feature_name in ['block2', 'block3', 'block4']: filtered_image_features[feature_name] = feature return filtered_image_features def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): with slim.arg_scope( [resnet_v1.bottleneck], use_bounded_activations=self._use_bounded_activations): _, activations = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=self._base_feature_map_depth, num_layers=self._num_layers, image_features={ 'image_features': self._filter_features(activations)['block3'] }) return list(feature_maps.values()) class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet50 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet50 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet50V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet101 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet101 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet101V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet152 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet152 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet152V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py
ssd_resnet_v1_ppn_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobilenetV1 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """MobileNetV1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_feature_extractor.py
ssd_mobilenet_v1_feature_extractor.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Keras-based MobilenetV2 FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v2 from object_detection.models.keras_models import model_utils from object_detection.utils import ops from object_detection.utils import shape_utils # Total number of blocks in Mobilenet_V2 base network. NUM_LAYERS = 19 # A modified config of mobilenet v2 that makes it more detection friendly. def _create_modified_mobilenet_config(): last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256) return [last_conv] class SSDMobileNetV2FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based MobilenetV2 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Mobilenet v2 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v2 layers {layer_4, layer_7, layer_14, layer_19}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) layer_names = [layer.name for layer in full_mobilenet_v2.layers] outputs = [] for layer_idx in [4, 7, 14]: add_name = 'block_{}_add'.format(layer_idx - 2) project_name = 'block_{}_project_BN'.format(layer_idx - 2) output_layer_name = add_name if add_name in layer_names else project_name outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output) layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output outputs.append(layer_19) self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v2.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + NUM_LAYERS) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append(self._feature_blocks[level - 2]) feature_start_index = len(self._feature_blocks) - self._num_levels fpn_input_image_features = [ (key, image_features[feature_start_index + index]) for index, key in enumerate(feature_block_list)] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( self._feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( self._feature_blocks[self._base_fpn_max_level - 2])] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py
ssd_mobilenet_v2_fpn_keras_feature_extractor.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v2_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v2_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV2FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor( False, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding, num_layers=num_layers) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_128_explicit_padding( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_with_dynamic_inputs( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV2' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) def test_variable_count(self): depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple) self.assertEqual(len(variables), 292) def test_has_fused_batchnorm(self): image_height = 40 image_width = 40 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue(any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations())) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, num_layers=4) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py
ssd_mobilenet_v2_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SSD Mobilenet V1 feature extractors. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V1 feature extractors in SSD. """ import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SsdMobilenetV1FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, is_training=False, use_keras=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. is_training: whether the network is in training mode. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ del use_keras min_depth = 32 return (ssd_mobilenet_v1_keras_feature_extractor .SSDMobileNetV1KerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, use_explicit_padding=use_explicit_padding, num_layers=num_layers, name='MobilenetV1')) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_with_dynamic_image_shape(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=True) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=True) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, num_layers=4, use_keras=True) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py
ssd_mobilenet_v1_feature_extractor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd resnet v1 feature extractors.""" import abc import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test class SSDResnetPpnFeatureExtractorTestBase( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): """Helper test class for SSD Resnet PPN feature extractors.""" @abc.abstractmethod def _scope_name(self): pass def test_extract_features_returns_correct_shapes_289(self): image_height = 289 image_width = 289 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), (2, 5, 5, 1024), (2, 3, 3, 1024), (2, 2, 2, 1024), (2, 1, 1, 1024)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): image_height = 289 image_width = 289 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), (2, 5, 5, 1024), (2, 3, 3, 1024), (2, 2, 2, 1024), (2, 1, 1, 1024)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = tf.constant(np.random.rand(4, image_height, image_width, 3)) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) with self.test_session() as sess: test_image_out, preprocessed_image_out = sess.run( [test_image, preprocessed_image]) self.assertAllClose(preprocessed_image_out, test_image_out - [[123.68, 116.779, 103.939]]) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, self._scope_name())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py
ssd_resnet_v1_ppn_feature_extractor_testbase.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """NASNet Faster R-CNN implementation. Learning Transferable Architectures for Scalable Image Recognition Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le https://arxiv.org/abs/1707.07012 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper # pylint: disable=g-import-not-at-top try: from nets.nasnet import nasnet from nets.nasnet import nasnet_utils except: # pylint: disable=bare-except pass # pylint: enable=g-import-not-at-top arg_scope = slim.arg_scope def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the NASNet-A Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the NASNet Large Model. """ imagenet_scope = nasnet.nasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc # Note: This is largely a copy of _build_nasnet_base inside nasnet.py but # with special edits to remove instantiation of the stem and the special # ability to receive as input a pair of hidden states. def _build_nasnet_base(hidden_previous, hidden, normal_cell, reduction_cell, hparams, true_cell_num, start_cell_num): """Constructs a NASNet image model.""" # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) # Note: The None is prepended to match the behavior of _imagenet_stem() cell_outputs = [None, hidden_previous, hidden] net = hidden # NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead # start at 2.0 because 1 reduction cell has been created which would # update the filter_scaling to 2.0. filter_scaling = 2.0 # Run the cells for cell_num in range(start_cell_num, hparams.num_cells): stride = 1 if hparams.skip_reduction_layer_input: prev_layer = cell_outputs[-2] if cell_num in reduction_indices: filter_scaling *= hparams.filter_scaling_rate net = reduction_cell( net, scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) if not hparams.skip_reduction_layer_input: prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) # Final nonlinearity. # Note that we have dropped the final pooling, dropout and softmax layers # from the default nasnet version. with tf.variable_scope('final_layer'): net = tf.nn.relu(net) return net # TODO(shlens): Only fixed_shape_resizer is currently supported for NASNet # featurization. The reason for this is that nasnet.py only supports # inputs with fully known shapes. We need to update nasnet.py to handle # shapes not known at compile time. class FasterRCNNNASFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with NASNet-A feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNNASFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with NAS preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the NASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(nasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = nasnet.build_nasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_11') # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016. rpn_feature_map = tf.concat([end_points['Cell_10'], end_points['Cell_11']], 3) # nasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the NASNet-A network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_nasnet_large() within # nasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = nasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network # -- Add 2 for the reduction cells. total_num_cells = hparams.num_cells + 2 # -- And add 2 for the stem cells for ImageNet training. total_num_cells += 2 normal_cell = nasnet_utils.NasNetANormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) reduction_cell = nasnet_utils.NasNetAReductionCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_11' used by # by _extract_proposal_features(). start_cell_num = 12 # Note that this number equals: # start_cell_num + 2 stem cells + 1 reduction cell true_cell_num = 15 with slim.arg_scope(nasnet.nasnet_large_arg_scope()): net = _build_nasnet_base(hidden_previous, hidden, normal_cell=normal_cell, reduction_cell=reduction_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for NASNet-A checkpoints. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ # Note that the NAS checkpoint only contains the moving average version of # the Variables so we need to generate an appropriate dictionary mapping. variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_nas_feature_extractor.py
faster_rcnn_nas_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1. See https://arxiv.org/abs/1708.02002 for details. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import resnet_v1 class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD FPN feature extractor based on Resnet v1 architecture.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_base_fn, resnet_scope_name, fpn_scope_name, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. resnet_base_fn: base resnet network to use. resnet_scope_name: scope name under which to construct resnet fpn_scope_name: scope name under which to construct the feature pyramid network. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: On supplying invalid arguments for unused arguments. """ super(SSDResnetV1FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if self._use_explicit_padding is True: raise ValueError('Explicit padding is not a valid option.') self._resnet_base_fn = resnet_base_fn self._resnet_scope_name = resnet_scope_name self._fpn_scope_name = fpn_scope_name self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _filter_features(self, image_features): # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead # of munging the scope here. filtered_image_features = dict({}) for key, feature in image_features.items(): feature_name = key.split('/')[-1] if feature_name in ['block1', 'block2', 'block3', 'block4']: filtered_image_features[feature_name] = feature return filtered_image_features def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, min_base_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) image_features = self._filter_features(image_features) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( base_fpn_max_level - 1)] # Construct coarse features for i in range(base_fpn_max_level, self._fpn_max_level): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) feature_maps.append(last_feature_map) return feature_maps class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet50 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet50V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet101 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet101V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet152 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet152V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
ssd_resnet_v1_fpn_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for embedded_ssd_mobilenet_v1_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor from object_detection.models import ssd_feature_extractor_test from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class EmbeddedSSDMobileNetV1FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return (embedded_ssd_mobilenet_v1_feature_extractor. EmbeddedSSDMobileNetV1FeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, override_base_feature_extractor_hyperparams=True)) def test_extract_features_returns_correct_shapes_256(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), (2, 4, 4, 512), (2, 2, 2, 256), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), (2, 4, 4, 512), (2, 2, 2, 256), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_pad_to_multiple_of_1( self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), (2, 4, 4, 512), (2, 2, 2, 256), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_raises_error_with_pad_to_multiple_not_1(self): depth_multiplier = 1.0 pad_to_multiple = 2 with self.assertRaises(ValueError): _ = self._create_feature_extractor(depth_multiplier, pad_to_multiple) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV1' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py
embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to generate a list of feature maps based on image features. Provides several feature map generators that can be used to build object detection feature extractors. Object detection feature extractors usually are built by stacking two components - A base feature extractor such as Inception V3 and a feature map generator. Feature map generators build on the base feature extractors and produce a list of final feature maps. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.utils import ops from object_detection.utils import shape_utils # Activation bound used for TPU v1. Activations will be clipped to # [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with # use_bounded_activations enabled. ACTIVATION_BOUND = 6.0 def get_depth_fn(depth_multiplier, min_depth): """Builds a callable to compute depth (output channels) of conv filters. Args: depth_multiplier: a multiplier for the nominal depth. min_depth: a lower bound on the depth of filters. Returns: A callable that takes in a nominal depth and returns the depth to use. """ def multiply_depth(depth): new_depth = int(depth * depth_multiplier) return max(new_depth, min_depth) return multiply_depth def create_conv_block( use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, is_training, freeze_batchnorm, depth): """Create Keras layers for depthwise & non-depthwise convolutions. Args: use_depthwise: Whether to use depthwise separable conv instead of regular conv. kernel_size: A list of length 2: [kernel_height, kernel_width] of the filters. Can be an int if both values are the same. padding: One of 'VALID' or 'SAME'. stride: A list of length 2: [stride_height, stride_width], specifying the convolution stride. Can be an int if both strides are the same. layer_name: String. The name of the layer. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. is_training: Indicates whether the feature generator is in training mode. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. depth: Depth of output feature maps. Returns: A list of conv layers. """ layers = [] if use_depthwise: kwargs = conv_hyperparams.params() # Both the regularizer and initializer apply to the depthwise layer, # so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] layers.append( tf.keras.layers.SeparableConv2D( depth, [kernel_size, kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **kwargs)) else: layers.append(tf.keras.layers.Conv2D( depth, [kernel_size, kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( conv_hyperparams.build_activation_layer( name=layer_name)) return layers class KerasMultiResolutionFeatureMaps(tf.keras.Model): """Generates multi resolution feature maps from input image features. A Keras model that generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, when called on inputs it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } When this feature generator object is called on input image_features: Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ def __init__(self, feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, is_training, conv_hyperparams, freeze_batchnorm, name=None): """Constructor. Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasMultiResolutionFeatureMaps, self).__init__(name=name) self.feature_map_layout = feature_map_layout self.convolutions = [] depth_fn = get_depth_fn(depth_multiplier, min_depth) base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): net = [] layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: base_from_layer = from_layer else: if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth // 2)) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' # We define this function here while capturing the value of # conv_kernel_size, to avoid holding a reference to the loop variable # conv_kernel_size inside of a lambda function def fixed_padding(features, kernel_size=conv_kernel_size): return ops.fixed_padding(features, kernel_size) net.append(tf.keras.layers.Lambda(fixed_padding)) # TODO(rathodv): Add some utilities to simplify the creation of # Depthwise & non-depthwise convolutions w/ normalization & activations if use_depthwise: net.append(tf.keras.layers.DepthwiseConv2D( [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_depthwise_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name + '_depthwise')) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) else: net.append(tf.keras.layers.Conv2D( depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self.convolutions.append(net) def call(self, image_features): """Generate the multi-resolution feature maps. Executed when calling the `.__call__` method on input. Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ feature_maps = [] feature_map_keys = [] for index, from_layer in enumerate(self.feature_map_layout['from_layer']): if from_layer: feature_map = image_features[from_layer] feature_map_keys.append(from_layer) else: feature_map = feature_maps[-1] for layer in self.convolutions[index]: feature_map = layer(feature_map) layer_name = self.convolutions[index][-1].name feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, image_features, pool_residual=False): """Generates multi resolution feature maps from input image features. Generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. image_features: A dictionary of handles to activation tensors from the base feature extractor. pool_residual: Whether to add an average pooling layer followed by a residual connection between subsequent feature maps when the channel depth match. For example, with option 'layer_depth': [-1, 512, 256, 256], a pooling and residual layer is added between the third and forth feature map. This option is better used with Weight Shared Convolution Box Predictor when all feature maps have the same channel depth to encourage more consistent features across multi-scale feature maps. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: if the number entries in 'from_layer' and 'layer_depth' do not match. ValueError: if the generated layer does not have the same resolution as specified. """ depth_fn = get_depth_fn(depth_multiplier, min_depth) feature_map_keys = [] feature_maps = [] base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: feature_map = image_features[from_layer] base_from_layer = from_layer feature_map_keys.append(from_layer) else: pre_layer = feature_maps[-1] pre_layer_depth = pre_layer.get_shape().as_list()[3] intermediate_layer = pre_layer if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth // 2)) intermediate_layer = slim.conv2d( pre_layer, depth_fn(layer_depth // 2), [1, 1], padding='SAME', stride=1, scope=layer_name) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' intermediate_layer = ops.fixed_padding( intermediate_layer, conv_kernel_size) if use_depthwise: feature_map = slim.separable_conv2d( intermediate_layer, None, [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, stride=stride, scope=layer_name + '_depthwise') feature_map = slim.conv2d( feature_map, depth_fn(layer_depth), [1, 1], padding='SAME', stride=1, scope=layer_name) if pool_residual and pre_layer_depth == depth_fn(layer_depth): if use_explicit_padding: pre_layer = ops.fixed_padding(pre_layer, conv_kernel_size) feature_map += slim.avg_pool2d( pre_layer, [conv_kernel_size, conv_kernel_size], padding=padding, stride=2, scope=layer_name + '_pool') else: feature_map = slim.conv2d( intermediate_layer, depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, stride=stride, scope=layer_name) feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) class KerasFpnTopDownFeatureMaps(tf.keras.Model): """Generates Keras based `top-down` feature maps for Feature Pyramid Networks. See https://arxiv.org/abs/1612.03144 for details. """ def __init__(self, num_levels, depth, is_training, conv_hyperparams, freeze_batchnorm, use_depthwise=False, use_explicit_padding=False, use_bounded_activations=False, use_native_resize_op=False, scope=None, name=None): """Constructor. Args: num_levels: the number of image features. depth: depth of output feature maps. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: whether to use depthwise separable conv instead of regular conv. use_explicit_padding: whether to use explicit padding. use_bounded_activations: Whether or not to clip activations to range [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend themselves to quantized inference. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for the upsampling process instead of reshape and broadcasting implementation. scope: A scope name to wrap this op under. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasFpnTopDownFeatureMaps, self).__init__(name=name) self.scope = scope if scope else 'top_down' self.top_layers = [] self.residual_blocks = [] self.top_down_blocks = [] self.reshape_blocks = [] self.conv_layers = [] padding = 'VALID' if use_explicit_padding else 'SAME' stride = 1 kernel_size = 3 def clip_by_value(features): return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND) # top layers self.top_layers.append(tf.keras.layers.Conv2D( depth, [1, 1], strides=stride, padding=padding, name='projection_%d' % num_levels, **conv_hyperparams.params(use_bias=True))) if use_bounded_activations: self.top_layers.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) for level in reversed(list(range(num_levels - 1))): # to generate residual from image features residual_net = [] # to preprocess top_down (the image feature map from last layer) top_down_net = [] # to reshape top_down according to residual if necessary reshaped_residual = [] # to apply convolution layers to feature map conv_net = [] # residual block residual_net.append(tf.keras.layers.Conv2D( depth, [1, 1], padding=padding, strides=1, name='projection_%d' % (level + 1), **conv_hyperparams.params(use_bias=True))) if use_bounded_activations: residual_net.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) # top-down block # TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling if use_native_resize_op: def resize_nearest_neighbor(image): image_shape = shape_utils.combined_static_and_dynamic_shape(image) return tf.image.resize_nearest_neighbor( image, [image_shape[1] * 2, image_shape[2] * 2]) top_down_net.append(tf.keras.layers.Lambda( resize_nearest_neighbor, name='nearest_neighbor_upsampling')) else: def nearest_neighbor_upsampling(image): return ops.nearest_neighbor_upsampling(image, scale=2) top_down_net.append(tf.keras.layers.Lambda( nearest_neighbor_upsampling, name='nearest_neighbor_upsampling')) # reshape block if use_explicit_padding: def reshape(inputs): residual_shape = tf.shape(inputs[0]) return inputs[1][:, :residual_shape[1], :residual_shape[2], :] reshaped_residual.append( tf.keras.layers.Lambda(reshape, name='reshape')) # down layers if use_bounded_activations: conv_net.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) if use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) conv_net.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'smoothing_%d' % (level + 1) conv_block = create_conv_block( use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, is_training, freeze_batchnorm, depth) conv_net.extend(conv_block) self.residual_blocks.append(residual_net) self.top_down_blocks.append(top_down_net) self.reshape_blocks.append(reshaped_residual) self.conv_layers.append(conv_net) def call(self, image_features): """Generate the multi-resolution feature maps. Executed when calling the `.__call__` method on input. Args: image_features: list of tuples of (tensor_name, image_feature_tensor). Spatial resolutions of succesive tensors must reduce exactly by a factor of 2. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ output_feature_maps_list = [] output_feature_map_keys = [] with tf.name_scope(self.scope): top_down = image_features[-1][1] for layer in self.top_layers: top_down = layer(top_down) output_feature_maps_list.append(top_down) output_feature_map_keys.append('top_down_%s' % image_features[-1][0]) num_levels = len(image_features) for index, level in enumerate(reversed(list(range(num_levels - 1)))): residual = image_features[level][1] top_down = output_feature_maps_list[-1] for layer in self.residual_blocks[index]: residual = layer(residual) for layer in self.top_down_blocks[index]: top_down = layer(top_down) for layer in self.reshape_blocks[index]: top_down = layer([residual, top_down]) top_down += residual for layer in self.conv_layers[index]: top_down = layer(top_down) output_feature_maps_list.append(top_down) output_feature_map_keys.append('top_down_%s' % image_features[level][0]) return collections.OrderedDict(reversed( list(zip(output_feature_map_keys, output_feature_maps_list)))) def fpn_top_down_feature_maps(image_features, depth, use_depthwise=False, use_explicit_padding=False, use_bounded_activations=False, scope=None, use_native_resize_op=False): """Generates `top-down` feature maps for Feature Pyramid Networks. See https://arxiv.org/abs/1612.03144 for details. Args: image_features: list of tuples of (tensor_name, image_feature_tensor). Spatial resolutions of succesive tensors must reduce exactly by a factor of 2. depth: depth of output feature maps. use_depthwise: whether to use depthwise separable conv instead of regular conv. use_explicit_padding: whether to use explicit padding. use_bounded_activations: Whether or not to clip activations to range [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend themselves to quantized inference. scope: A scope name to wrap this op under. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for the upsampling process instead of reshape and broadcasting implementation. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ with tf.name_scope(scope, 'top_down'): num_levels = len(image_features) output_feature_maps_list = [] output_feature_map_keys = [] padding = 'VALID' if use_explicit_padding else 'SAME' kernel_size = 3 with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1): top_down = slim.conv2d( image_features[-1][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % num_levels) if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) output_feature_maps_list.append(top_down) output_feature_map_keys.append( 'top_down_%s' % image_features[-1][0]) for level in reversed(list(range(num_levels - 1))): if use_native_resize_op: with tf.name_scope('nearest_neighbor_upsampling'): top_down_shape = shape_utils.combined_static_and_dynamic_shape( top_down) top_down = tf.image.resize_nearest_neighbor( top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2]) else: top_down = ops.nearest_neighbor_upsampling(top_down, scale=2) residual = slim.conv2d( image_features[level][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % (level + 1)) if use_bounded_activations: residual = tf.clip_by_value(residual, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_explicit_padding: # slice top_down to the same shape as residual residual_shape = tf.shape(residual) top_down = top_down[:, :residual_shape[1], :residual_shape[2], :] top_down += residual if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d pre_output = top_down if use_explicit_padding: pre_output = ops.fixed_padding(pre_output, kernel_size) output_feature_maps_list.append(conv_op( pre_output, depth, [kernel_size, kernel_size], scope='smoothing_%d' % (level + 1))) output_feature_map_keys.append('top_down_%s' % image_features[level][0]) return collections.OrderedDict(reversed( list(zip(output_feature_map_keys, output_feature_maps_list)))) def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers, image_features, replace_pool_with_conv=False): """Generates pooling pyramid feature maps. The pooling pyramid feature maps is motivated by multi_resolution_feature_maps. The main difference are that it is simpler and reduces the number of free parameters. More specifically: - Instead of using convolutions to shrink the feature map, it uses max pooling, therefore totally gets rid of the parameters in convolution. - By pooling feature from larger map up to a single cell, it generates features in the same feature space. - Instead of independently making box predictions from individual maps, it shares the same classifier across different feature maps, therefore reduces the "mis-calibration" across different scales. See go/ppn-detection for more details. Args: base_feature_map_depth: Depth of the base feature before the max pooling. num_layers: Number of layers used to make predictions. They are pooled from the base feature. image_features: A dictionary of handles to activation tensors from the feature extractor. replace_pool_with_conv: Whether or not to replace pooling operations with convolutions in the PPN. Default is False. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: image_features does not contain exactly one entry """ if len(image_features) != 1: raise ValueError('image_features should be a dictionary of length 1.') image_features = image_features[list(image_features.keys())[0]] feature_map_keys = [] feature_maps = [] feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth if base_feature_map_depth > 0: image_features = slim.conv2d( image_features, base_feature_map_depth, [1, 1], # kernel size padding='SAME', stride=1, scope=feature_map_key) # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for # TPU v1 compatibility. Without the following dummy op, TPU runtime # compiler will combine the convolution with one max-pooling below into a # single cycle, so getting the conv2d feature becomes impossible. image_features = slim.max_pool2d( image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(image_features) feature_map = image_features if replace_pool_with_conv: with slim.arg_scope([slim.conv2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i, base_feature_map_depth) feature_map = slim.conv2d( feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) else: with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'MaxPool2d_%d_2x2' % i feature_map = slim.max_pool2d( feature_map, [2, 2], padding='SAME', scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/feature_map_generators.py
feature_map_generators.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for faster_rcnn_mobilenet_v1_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1 from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return faster_rcnn_mobilenet_v1.FasterRCNNMobilenetV1FeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 512]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_on_very_small_images(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run( features_shape, feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [3, 14, 14, 576], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py
faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for feature map generators.""" import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models import feature_map_generators from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version INCEPTION_V2_LAYOUT = { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 256], 'anchor_strides': [16, 32, 64, -1, -1, -1], 'layer_target_norm': [20.0, -1, -1, -1, -1, -1], } INCEPTION_V3_LAYOUT = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'anchor_strides': [16, 32, 64, -1, -1, -1], 'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3] } EMBEDDED_SSD_MOBILENET_V1_LAYOUT = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256], 'conv_kernel_size': [-1, -1, 3, 3, 2], } SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = { 'from_layer': ['Conv2d_13_pointwise', '', '', ''], 'layer_depth': [-1, 256, 256, 256], } class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _build_feature_map_generator(self, feature_map_layout, pool_residual=False): if tf_version.is_tf2(): return feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=feature_map_layout, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, freeze_batchnorm=False, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), name='FeatureMaps' ) else: def feature_map_generator(image_features): return feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=1, min_depth=32, insert_1x1_conv=True, image_features=image_features, pool_residual=pool_residual) return feature_map_generator def test_get_expected_feature_map_shapes_with_inception_v2(self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_map_generator = self._build_feature_map_generator( feature_map_layout=INCEPTION_V2_LAYOUT) def graph_fn(): feature_maps = feature_map_generator(image_features) return feature_maps expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise( self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } layout_copy = INCEPTION_V2_LAYOUT.copy() layout_copy['use_depthwise'] = True feature_map_generator = self._build_feature_map_generator( feature_map_layout=layout_copy) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_feature_map_shapes_use_explicit_padding(self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } layout_copy = INCEPTION_V2_LAYOUT.copy() layout_copy['use_explicit_padding'] = True feature_map_generator = self._build_feature_map_generator( feature_map_layout=layout_copy, ) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'Mixed_3c': (4, 28, 28, 256), 'Mixed_4c': (4, 14, 14, 576), 'Mixed_5c': (4, 7, 7, 1024), 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_feature_map_shapes_with_inception_v3(self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) } feature_map_generator = self._build_feature_map_generator( feature_map_layout=INCEPTION_V3_LAYOUT, ) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'Mixed_5d': (4, 35, 35, 256), 'Mixed_6e': (4, 17, 17, 576), 'Mixed_7c': (4, 8, 8, 1024), 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( self): with test_utils.GraphContextOrNone() as g: image_features = { 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], dtype=tf.float32), 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32), } feature_map_generator = self._build_feature_map_generator( feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, ) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'Conv2d_11_pointwise': (4, 16, 16, 512), 'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1( self): with test_utils.GraphContextOrNone() as g: image_features = { 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32), } feature_map_generator = self._build_feature_map_generator( feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT, pool_residual=True ) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'Conv2d_13_pointwise': (4, 8, 8, 1024), 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256), 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256), 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_variable_names_with_inception_v2(self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } feature_map_generator = self._build_feature_map_generator( feature_map_layout=INCEPTION_V2_LAYOUT, ) def graph_fn(): return feature_map_generator(image_features) self.execute(graph_fn, [], g) expected_slim_variables = set([ 'Mixed_5c_1_Conv2d_3_1x1_256/weights', 'Mixed_5c_1_Conv2d_3_1x1_256/biases', 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', 'Mixed_5c_1_Conv2d_4_1x1_128/weights', 'Mixed_5c_1_Conv2d_4_1x1_128/biases', 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', 'Mixed_5c_1_Conv2d_5_1x1_128/weights', 'Mixed_5c_1_Conv2d_5_1x1_128/biases', 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', ]) expected_keras_variables = set([ 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', ]) if tf_version.is_tf2(): actual_variable_set = set( [var.name.split(':')[0] for var in feature_map_generator.variables]) self.assertSetEqual(expected_keras_variables, actual_variable_set) else: with g.as_default(): actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertSetEqual(expected_slim_variables, actual_variable_set) def test_get_expected_variable_names_with_inception_v2_use_depthwise( self): with test_utils.GraphContextOrNone() as g: image_features = { 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) } layout_copy = INCEPTION_V2_LAYOUT.copy() layout_copy['use_depthwise'] = True feature_map_generator = self._build_feature_map_generator( feature_map_layout=layout_copy, ) def graph_fn(): return feature_map_generator(image_features) self.execute(graph_fn, [], g) expected_slim_variables = set([ 'Mixed_5c_1_Conv2d_3_1x1_256/weights', 'Mixed_5c_1_Conv2d_3_1x1_256/biases', 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights', 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases', 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', 'Mixed_5c_1_Conv2d_4_1x1_128/weights', 'Mixed_5c_1_Conv2d_4_1x1_128/biases', 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights', 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases', 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', 'Mixed_5c_1_Conv2d_5_1x1_128/weights', 'Mixed_5c_1_Conv2d_5_1x1_128/biases', 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights', 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases', 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', ]) expected_keras_variables = set([ 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' 'depthwise_kernel'), ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' 'bias'), 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' 'depthwise_kernel'), ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' 'bias'), 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' 'depthwise_kernel'), ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' 'bias'), 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', ]) if tf_version.is_tf2(): actual_variable_set = set( [var.name.split(':')[0] for var in feature_map_generator.variables]) self.assertSetEqual(expected_keras_variables, actual_variable_set) else: with g.as_default(): actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertSetEqual(expected_slim_variables, actual_variable_set) @parameterized.parameters({'use_native_resize_op': True}, {'use_native_resize_op': False}) class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _build_feature_map_generator( self, image_features, depth, use_bounded_activations=False, use_native_resize_op=False, use_explicit_padding=False, use_depthwise=False): if tf_version.is_tf2(): return feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=len(image_features), depth=depth, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op, scope=None, name='FeatureMaps', ) else: def feature_map_generator(image_features): return feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=depth, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op) return feature_map_generator def test_get_expected_feature_map_shapes( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_feature_map_shapes_with_explicit_padding( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_explicit_padding=True, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') def test_use_bounded_activations_add_operations( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_bounded_activations=True, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) self.execute(graph_fn, [], g) expected_added_operations = dict.fromkeys([ 'top_down/clip_by_value', 'top_down/clip_by_value_1', 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', 'top_down/clip_by_value_6' ]) op_names = {op.name: None for op in g.get_operations()} self.assertDictContainsSubset(expected_added_operations, op_names) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') def test_use_bounded_activations_clip_value( self, use_native_resize_op): tf_graph = tf.Graph() with tf_graph.as_default(): image_features = [ ('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)), ('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)), ('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)), ('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_bounded_activations=True, use_native_resize_op=use_native_resize_op) feature_map_generator(image_features) expected_clip_by_value_ops = [ 'top_down/clip_by_value', 'top_down/clip_by_value_1', 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', 'top_down/clip_by_value_6' ] # Gathers activation tensors before and after clip_by_value operations. activations = {} for clip_by_value_op in expected_clip_by_value_ops: clip_input_tensor = tf_graph.get_operation_by_name( '{}/Minimum'.format(clip_by_value_op)).inputs[0] clip_output_tensor = tf_graph.get_tensor_by_name( '{}:0'.format(clip_by_value_op)) activations.update({ 'before_{}'.format(clip_by_value_op): clip_input_tensor, 'after_{}'.format(clip_by_value_op): clip_output_tensor, }) expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND expected_upper_bound = feature_map_generators.ACTIVATION_BOUND init_op = tf.global_variables_initializer() with self.test_session() as session: session.run(init_op) activations_output = session.run(activations) for clip_by_value_op in expected_clip_by_value_ops: # Before clipping, activations are beyound the expected bound because # of large input image_features values. activations_before_clipping = ( activations_output['before_{}'.format(clip_by_value_op)]) before_clipping_lower_bound = np.amin(activations_before_clipping) before_clipping_upper_bound = np.amax(activations_before_clipping) self.assertLessEqual(before_clipping_lower_bound, expected_lower_bound) self.assertGreaterEqual(before_clipping_upper_bound, expected_upper_bound) # After clipping, activations are bounded as expectation. activations_after_clipping = ( activations_output['after_{}'.format(clip_by_value_op)]) after_clipping_lower_bound = np.amin(activations_after_clipping) after_clipping_upper_bound = np.amax(activations_after_clipping) self.assertGreaterEqual(after_clipping_lower_bound, expected_lower_bound) self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound) def test_get_expected_feature_map_shapes_with_depthwise( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_depthwise=True, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_variable_names( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) self.execute(graph_fn, [], g) expected_slim_variables = set([ 'projection_1/weights', 'projection_1/biases', 'projection_2/weights', 'projection_2/biases', 'projection_3/weights', 'projection_3/biases', 'projection_4/weights', 'projection_4/biases', 'smoothing_1/weights', 'smoothing_1/biases', 'smoothing_2/weights', 'smoothing_2/biases', 'smoothing_3/weights', 'smoothing_3/biases', ]) expected_keras_variables = set([ 'FeatureMaps/top_down/projection_1/kernel', 'FeatureMaps/top_down/projection_1/bias', 'FeatureMaps/top_down/projection_2/kernel', 'FeatureMaps/top_down/projection_2/bias', 'FeatureMaps/top_down/projection_3/kernel', 'FeatureMaps/top_down/projection_3/bias', 'FeatureMaps/top_down/projection_4/kernel', 'FeatureMaps/top_down/projection_4/bias', 'FeatureMaps/top_down/smoothing_1_conv/kernel', 'FeatureMaps/top_down/smoothing_1_conv/bias', 'FeatureMaps/top_down/smoothing_2_conv/kernel', 'FeatureMaps/top_down/smoothing_2_conv/bias', 'FeatureMaps/top_down/smoothing_3_conv/kernel', 'FeatureMaps/top_down/smoothing_3_conv/bias' ]) if tf_version.is_tf2(): actual_variable_set = set( [var.name.split(':')[0] for var in feature_map_generator.variables]) self.assertSetEqual(expected_keras_variables, actual_variable_set) else: with g.as_default(): actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertSetEqual(expected_slim_variables, actual_variable_set) def test_get_expected_variable_names_with_depthwise( self, use_native_resize_op): with test_utils.GraphContextOrNone() as g: image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_map_generator = self._build_feature_map_generator( image_features=image_features, depth=128, use_depthwise=True, use_native_resize_op=use_native_resize_op) def graph_fn(): return feature_map_generator(image_features) self.execute(graph_fn, [], g) expected_slim_variables = set([ 'projection_1/weights', 'projection_1/biases', 'projection_2/weights', 'projection_2/biases', 'projection_3/weights', 'projection_3/biases', 'projection_4/weights', 'projection_4/biases', 'smoothing_1/depthwise_weights', 'smoothing_1/pointwise_weights', 'smoothing_1/biases', 'smoothing_2/depthwise_weights', 'smoothing_2/pointwise_weights', 'smoothing_2/biases', 'smoothing_3/depthwise_weights', 'smoothing_3/pointwise_weights', 'smoothing_3/biases', ]) expected_keras_variables = set([ 'FeatureMaps/top_down/projection_1/kernel', 'FeatureMaps/top_down/projection_1/bias', 'FeatureMaps/top_down/projection_2/kernel', 'FeatureMaps/top_down/projection_2/bias', 'FeatureMaps/top_down/projection_3/kernel', 'FeatureMaps/top_down/projection_3/bias', 'FeatureMaps/top_down/projection_4/kernel', 'FeatureMaps/top_down/projection_4/bias', 'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel', 'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel', 'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias', 'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel', 'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel', 'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias', 'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel', 'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel', 'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias' ]) if tf_version.is_tf2(): actual_variable_set = set( [var.name.split(':')[0] for var in feature_map_generator.variables]) self.assertSetEqual(expected_keras_variables, actual_variable_set) else: with g.as_default(): actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertSetEqual(expected_slim_variables, actual_variable_set) class GetDepthFunctionTest(tf.test.TestCase): def test_return_min_depth_when_multiplier_is_small(self): depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, min_depth=16) self.assertEqual(depth_fn(16), 16) def test_return_correct_depth_with_multiplier(self): depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, min_depth=16) self.assertEqual(depth_fn(64), 32) @parameterized.parameters( {'replace_pool_with_conv': False}, {'replace_pool_with_conv': True}, ) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase): def test_get_expected_feature_map_shapes(self, replace_pool_with_conv): image_features = { 'image_features': tf.random_uniform([4, 19, 19, 1024]) } feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=1024, num_layers=6, image_features=image_features, replace_pool_with_conv=replace_pool_with_conv) expected_pool_feature_map_shapes = { 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), 'MaxPool2d_0_2x2': (4, 10, 10, 1024), 'MaxPool2d_1_2x2': (4, 5, 5, 1024), 'MaxPool2d_2_2x2': (4, 3, 3, 1024), 'MaxPool2d_3_2x2': (4, 2, 2, 1024), 'MaxPool2d_4_2x2': (4, 1, 1, 1024), } expected_conv_feature_map_shapes = { 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), 'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024), 'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024), 'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024), 'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024), 'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024), } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} if replace_pool_with_conv: self.assertDictEqual(expected_conv_feature_map_shapes, out_feature_map_shapes) else: self.assertDictEqual(expected_pool_feature_map_shapes, out_feature_map_shapes) def test_get_expected_variable_names(self, replace_pool_with_conv): image_features = { 'image_features': tf.random_uniform([4, 19, 19, 1024]) } feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=1024, num_layers=6, image_features=image_features, replace_pool_with_conv=replace_pool_with_conv) expected_pool_variables = set([ 'Base_Conv2d_1x1_1024/weights', 'Base_Conv2d_1x1_1024/biases', ]) expected_conv_variables = set([ 'Base_Conv2d_1x1_1024/weights', 'Base_Conv2d_1x1_1024/biases', 'Conv2d_0_3x3_s2_1024/weights', 'Conv2d_0_3x3_s2_1024/biases', 'Conv2d_1_3x3_s2_1024/weights', 'Conv2d_1_3x3_s2_1024/biases', 'Conv2d_2_3x3_s2_1024/weights', 'Conv2d_2_3x3_s2_1024/biases', 'Conv2d_3_3x3_s2_1024/weights', 'Conv2d_3_3x3_s2_1024/biases', 'Conv2d_4_3x3_s2_1024/weights', 'Conv2d_4_3x3_s2_1024/biases', ]) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) sess.run(feature_maps) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) if replace_pool_with_conv: self.assertSetEqual(expected_conv_variables, actual_variable_set) else: self.assertSetEqual(expected_pool_variables, actual_variable_set) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/feature_map_generators_test.py
feature_map_generators_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v1_ppn_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_ppn_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV1PpnFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return (ssd_mobilenet_v1_ppn_feature_extractor. SSDMobileNetV1PpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def test_extract_features_returns_correct_shapes_320(self): image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), (2, 5, 5, 512), (2, 3, 3, 512), (2, 2, 2, 512), (2, 1, 1, 512)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_300(self): image_height = 300 image_width = 300 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 512), (2, 5, 5, 512), (2, 3, 3, 512), (2, 2, 2, 512), (2, 1, 1, 512)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_640(self): image_height = 640 image_width = 640 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 40, 512), (2, 20, 20, 512), (2, 10, 10, 512), (2, 5, 5, 512), (2, 3, 3, 512), (2, 2, 2, 512)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_with_dynamic_image_shape(self): image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), (2, 5, 5, 512), (2, 3, 3, 512), (2, 2, 2, 512), (2, 1, 1, 512)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), (2, 5, 5, 512), (2, 3, 3, 512), (2, 2, 2, 512)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV1' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) def test_has_fused_batchnorm(self): image_height = 320 image_width = 320 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue(any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations())) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py
ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_resnet_v1_fpn_keras_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_res_fpn from object_detection.protos import hyperparams_pb2 from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _build_feature_extractor(self): return frcnn_res_fpn.FasterRCNNResnet50FpnKerasFeatureExtractor( is_training=False, conv_hyperparams=self._build_conv_hyperparams(), first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [2, 448, 448, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shapes = [tf.shape(rpn_feature_map) for rpn_feature_map in rpn_feature_maps] self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256]) self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [2, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shapes = [tf.shape(rpn_feature_map) for rpn_feature_map in rpn_feature_maps] self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256]) self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256]) def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() proposal_feature_maps = tf.random_uniform( [3, 7, 7, 1024], maxval=255, dtype=tf.float32) model = feature_extractor.get_box_classifier_feature_extractor_model( name='TestScope') proposal_classifier_features = ( model(proposal_feature_maps)) features_shape = tf.shape(proposal_classifier_features) self.assertAllEqual(features_shape.numpy(), [3, 1, 1, 1024])
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py
faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for InceptionV2 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils from nets import inception_v2 class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using InceptionV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """InceptionV2 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: If `override_base_feature_extractor_hyperparams` is False. """ super(SSDInceptionV2FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if not self._override_base_feature_extractor_hyperparams: raise ValueError('SSD Inception V2 feature extractor always uses' 'scope returned by `conv_hyperparams_fn` for both the ' 'base feature extractor and the additional layers ' 'added since there is no arg_scope defined for the base ' 'feature extractor.') def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v2_feature_extractor.py
ssd_inception_v2_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing mobilenet_v2+FPN feature extractor for CenterNet.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMobileNetV2FPNFeatureExtractorTest(test_case.TestCase): def test_center_net_mobilenet_v2_fpn_feature_extractor(self): channel_means = (0., 0., 0.) channel_stds = (1., 1., 1.) bgr_ordering = False model = ( center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn( channel_means, channel_stds, bgr_ordering, use_separable_conv=False)) def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 24)) # Pull out the FPN network. output = model.get_layer('model_1') for layer in output.layers: # All convolution layers should be normal 2D convolutions. if 'conv' in layer.name: self.assertIsInstance(layer, tf.keras.layers.Conv2D) def test_center_net_mobilenet_v2_fpn_feature_extractor_sep_conv(self): channel_means = (0., 0., 0.) channel_stds = (1., 1., 1.) bgr_ordering = False model = ( center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn( channel_means, channel_stds, bgr_ordering, use_separable_conv=True)) def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 24)) # Pull out the FPN network. backbone = model.get_layer('model') first_conv = backbone.get_layer('Conv1') self.assertEqual(32, first_conv.filters) # Pull out the FPN network. output = model.get_layer('model_1') for layer in output.layers: # Convolution layers with kernel size not equal to (1, 1) should be # separable 2D convolutions. if 'conv' in layer.name and layer.kernel_size != (1, 1): self.assertIsInstance(layer, tf.keras.layers.SeparableConv2D) def test_center_net_mobilenet_v2_fpn_feature_extractor_depth_multiplier(self): channel_means = (0., 0., 0.) channel_stds = (1., 1., 1.) bgr_ordering = False model = ( center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn( channel_means, channel_stds, bgr_ordering, use_separable_conv=True, depth_multiplier=2.0)) def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 24)) # Pull out the FPN network. backbone = model.get_layer('model') first_conv = backbone.get_layer('Conv1') # Note that the first layer typically has 32 filters, but this model has # a depth multiplier of 2. self.assertEqual(64, first_conv.filters) def test_center_net_mobilenet_v2_fpn_feature_extractor_interpolation(self): channel_means = (0., 0., 0.) channel_stds = (1., 1., 1.) bgr_ordering = False model = ( center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn( channel_means, channel_stds, bgr_ordering, use_separable_conv=True, upsampling_interpolation='bilinear')) def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 24)) # Verify the upsampling layers in the FPN use 'bilinear' interpolation. fpn = model.get_layer('model_1') for layer in fpn.layers: if 'up_sampling2d' in layer.name: self.assertEqual('bilinear', layer.interpolation) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py
center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing ResNet v1 FPN models for the CenterNet meta architecture.""" import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import center_net_resnet_v1_fpn_feature_extractor from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters( {'resnet_type': 'resnet_v1_50'}, {'resnet_type': 'resnet_v1_101'}, {'resnet_type': 'resnet_v1_18'}, {'resnet_type': 'resnet_v1_34'}, ) def test_correct_output_size(self, resnet_type): """Verify that shape of features returned by the backbone is correct.""" model = center_net_resnet_v1_fpn_feature_extractor.\ CenterNetResnetV1FpnFeatureExtractor(resnet_type) def graph_fn(): img = np.zeros((8, 512, 512, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) self.assertEqual(self.execute(graph_fn, []).shape, (8, 128, 128, 64)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobiledet_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobiledet_feature_extractor from object_detection.utils import tf_version try: from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDMobileDetFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, feature_extractor_cls, is_training=False, depth_multiplier=1.0, pad_to_multiple=1, use_explicit_padding=False, use_keras=False): """Constructs a new MobileDet feature extractor. Args: feature_extractor_cls: feature extractor class. is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: If True, we will use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDMobileDetFeatureExtractor object. """ min_depth = 32 return feature_extractor_cls( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding) def test_mobiledet_cpu_returns_correct_shapes(self): expected_feature_map_shapes = [(2, 40, 20, 72), (2, 20, 10, 144), (2, 10, 5, 512), (2, 5, 3, 256), (2, 3, 2, 256), (2, 2, 1, 128)] feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor) image = tf.random.normal((2, 640, 320, 3)) feature_maps = feature_extractor.extract_features(image) self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): self.assertTrue(x.shape.is_compatible_with(expected_shape)) def test_mobiledet_dsp_returns_correct_shapes(self): expected_feature_map_shapes = [(2, 40, 20, 144), (2, 20, 10, 240), (2, 10, 5, 512), (2, 5, 3, 256), (2, 3, 2, 256), (2, 2, 1, 128)] feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor) image = tf.random.normal((2, 640, 320, 3)) feature_maps = feature_extractor.extract_features(image) self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): self.assertTrue(x.shape.is_compatible_with(expected_shape)) def test_mobiledet_edgetpu_returns_correct_shapes(self): expected_feature_map_shapes = [(2, 40, 20, 96), (2, 20, 10, 384), (2, 10, 5, 512), (2, 5, 3, 256), (2, 3, 2, 256), (2, 2, 1, 128)] feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor) image = tf.random.normal((2, 640, 320, 3)) feature_maps = feature_extractor.extract_features(image) self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): self.assertTrue(x.shape.is_compatible_with(expected_shape)) def test_mobiledet_gpu_returns_correct_shapes(self): expected_feature_map_shapes = [(2, 40, 20, 128), (2, 20, 10, 384), (2, 10, 5, 512), (2, 5, 3, 256), (2, 3, 2, 256), (2, 2, 1, 128)] feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetGPUFeatureExtractor) image = tf.random.normal((2, 640, 320, 3)) feature_maps = feature_extractor.extract_features(image) self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): self.assertTrue(x.shape.is_compatible_with(expected_shape)) def _check_quantization(self, model_fn): checkpoint_dir = self.get_temp_dir() with tf.Graph().as_default() as training_graph: model_fn(is_training=True) contrib_quantize.experimental_create_training_graph(training_graph) with self.session(graph=training_graph) as sess: sess.run(tf.global_variables_initializer()) tf.train.Saver().save(sess, checkpoint_dir) with tf.Graph().as_default() as eval_graph: model_fn(is_training=False) contrib_quantize.experimental_create_eval_graph(eval_graph) with self.session(graph=eval_graph) as sess: tf.train.Saver().restore(sess, checkpoint_dir) def test_mobiledet_cpu_quantization(self): def model_fn(is_training): feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor, is_training=is_training) image = tf.random.normal((2, 320, 320, 3)) feature_extractor.extract_features(image) self._check_quantization(model_fn) def test_mobiledet_dsp_quantization(self): def model_fn(is_training): feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor, is_training=is_training) image = tf.random.normal((2, 320, 320, 3)) feature_extractor.extract_features(image) self._check_quantization(model_fn) def test_mobiledet_edgetpu_quantization(self): def model_fn(is_training): feature_extractor = self._create_feature_extractor( ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor, is_training=is_training) image = tf.random.normal((2, 320, 320, 3)) feature_extractor.extract_features(image) self._check_quantization(model_fn) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py
ssd_mobiledet_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for faster_rcnn_inception_v2_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2 from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return faster_rcnn_inception_v2.FasterRCNNInceptionV2FeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 576]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_on_very_small_images(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run( features_shape, feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [3, 14, 14, 576], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py
faster_rcnn_inception_v2_feature_extractor_tf1_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to generate bidirectional feature pyramids based on image features. Provides bidirectional feature pyramid network (BiFPN) generators that can be used to build object detection feature extractors, as proposed by Tan et al. See https://arxiv.org/abs/1911.09070 for more details. """ import collections import functools from six.moves import range from six.moves import zip import tensorflow as tf from object_detection.utils import bifpn_utils def _create_bifpn_input_config(fpn_min_level, fpn_max_level, input_max_level, level_scales=None): """Creates a BiFPN input config for the input levels from a backbone network. Args: fpn_min_level: the minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: the maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels from input_max_level, up to the desired fpn_max_level. level_scales: a list of pyramid level scale factors. If 'None', each level's scale is set to 2^level by default, which corresponds to each successive feature map scaling by a factor of 2. Returns: A list of dictionaries for each feature map expected as input to the BiFPN, where each has entries for the feature map 'name' and 'scale'. """ if not level_scales: level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] bifpn_input_params = [] for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1): bifpn_input_params.append({ 'name': '0_up_lvl_{}'.format(i), 'scale': level_scales[i - fpn_min_level] }) return bifpn_input_params def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config): """Returns a list of BiFPN output node names, given a BiFPN node config. Args: fpn_min_level: the minimum pyramid level (highest feature map resolution) used by the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) used by the BiFPN. node_config: the BiFPN node_config, a list of dictionaries corresponding to each node in the BiFPN computation graph, where each entry should have an associated 'name'. Returns: A list of strings corresponding to the names of the output BiFPN nodes. """ num_output_nodes = fpn_max_level - fpn_min_level + 1 return [node['name'] for node in node_config[-num_output_nodes:]] def _create_bifpn_node_config(bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, bifpn_node_params=None, level_scales=None): """Creates a config specifying a bidirectional feature pyramid network. Args: bifpn_num_iterations: the number of top-down bottom-up feature computations to repeat in the BiFPN. bifpn_num_filters: the number of filters (channels) for every feature map used in the BiFPN. fpn_min_level: the minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: the maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels from input_max_level, up to the desired fpn_max_level. bifpn_node_params: If not 'None', a dictionary of additional default BiFPN node parameters that will be applied to all BiFPN nodes. level_scales: a list of pyramid level scale factors. If 'None', each level's scale is set to 2^level by default, which corresponds to each successive feature map scaling by a factor of 2. Returns: A list of dictionaries used to define nodes in the BiFPN computation graph, as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070). Each node's entry has the corresponding keys: name: String. The name of this node in the BiFPN. The node name follows the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn' or 'up' refers to whether the node is in the top-down or bottom-up portion of a single BiFPN iteration. scale: the scale factor for this node, by default 2^level. inputs: A list of names of nodes which are inputs to this node. num_channels: The number of channels for this node. combine_method: String. Name of the method used to combine input node feature maps, 'fast_attention' by default for nodes which have more than one input. Otherwise, 'None' for nodes with only one input node. input_op: A (partial) function which is called to construct the layers that will be applied to this BiFPN node's inputs. This function is called with the arguments: input_op(name, input_scale, input_num_channels, output_scale, output_num_channels, conv_hyperparams, is_training, freeze_batchnorm) post_combine_op: A (partial) function which is called to construct the layers that will be applied to the result of the combine operation for this BiFPN node. This function will be called with the arguments: post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm) If 'None', then no layers will be applied after the combine operation for this node. """ if not level_scales: level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] default_node_params = { 'num_channels': bifpn_num_filters, 'combine_method': 'fast_attention', 'input_op': functools.partial( _create_bifpn_resample_block, downsample_method='max_pooling'), 'post_combine_op': functools.partial( bifpn_utils.create_conv_block, num_filters=bifpn_num_filters, kernel_size=3, strides=1, padding='SAME', use_separable=True, apply_batchnorm=True, apply_activation=True, conv_bn_act_pattern=False), } if bifpn_node_params: default_node_params.update(bifpn_node_params) bifpn_node_params = [] # Create additional base pyramid levels not provided as input to the BiFPN. # Note, combine_method and post_combine_op are set to None for additional # base pyramid levels because they do not combine multiple input BiFPN nodes. for i in range(input_max_level + 1, fpn_max_level + 1): node_params = dict(default_node_params) node_params.update({ 'name': '0_up_lvl_{}'.format(i), 'scale': level_scales[i - fpn_min_level], 'inputs': ['0_up_lvl_{}'.format(i - 1)], 'combine_method': None, 'post_combine_op': None, }) bifpn_node_params.append(node_params) for i in range(bifpn_num_iterations): # The first bottom-up feature pyramid (which includes the input pyramid # levels from the backbone network and the additional base pyramid levels) # is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is # indexed from 1, and repeated for bifpn_num_iterations iterations. bifpn_i = i + 1 # Create top-down nodes. for level_i in reversed(range(fpn_min_level, fpn_max_level)): inputs = [] # BiFPN nodes in the top-down pass receive input from the corresponding # level from the previous BiFPN iteration's bottom-up pass, except for the # bottom-most (min) level node, which is computed once in the initial # bottom-up pass, and is afterwards only computed in each top-down pass. if level_i > fpn_min_level or bifpn_i == 1: inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i)) else: inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i)) inputs.append(bifpn_node_params[-1]['name']) node_params = dict(default_node_params) node_params.update({ 'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i), 'scale': level_scales[level_i - fpn_min_level], 'inputs': inputs }) bifpn_node_params.append(node_params) # Create bottom-up nodes. for level_i in range(fpn_min_level + 1, fpn_max_level + 1): # BiFPN nodes in the bottom-up pass receive input from the corresponding # level from the preceding top-down pass, except for the top (max) level # which does not have a corresponding node in the top-down pass. inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)] if level_i < fpn_max_level: inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i)) inputs.append(bifpn_node_params[-1]['name']) node_params = dict(default_node_params) node_params.update({ 'name': '{}_up_lvl_{}'.format(bifpn_i, level_i), 'scale': level_scales[level_i - fpn_min_level], 'inputs': inputs }) bifpn_node_params.append(node_params) return bifpn_node_params def _create_bifpn_resample_block(name, input_scale, input_num_channels, output_scale, output_num_channels, conv_hyperparams, is_training, freeze_batchnorm, downsample_method=None, use_native_resize_op=False, maybe_apply_1x1_conv=True, apply_1x1_pre_sampling=True, apply_1x1_post_sampling=False): """Creates resample block layers for input feature maps to BiFPN nodes. Args: name: String. Name used for this block of layers. input_scale: Scale factor of the input feature map. input_num_channels: Number of channels in the input feature map. output_scale: Scale factor of the output feature map. output_num_channels: Number of channels in the output feature map. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. is_training: Indicates whether the feature generator is in training mode. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. downsample_method: String. Method to use when downsampling feature maps. use_native_resize_op: Bool. Whether to use the native resize up when upsampling feature maps. maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be applied if the input_num_channels differs from the output_num_channels. apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to the input feature map before the up/down-sampling operation. apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to the input feature map after the up/down-sampling operation. Returns: A list of layers which may be applied to the input feature maps in order to compute feature maps with the specified scale and number of channels. """ # By default, 1x1 convolutions are only applied before sampling when the # number of input and output channels differ. if maybe_apply_1x1_conv and output_num_channels == input_num_channels: apply_1x1_pre_sampling = False apply_1x1_post_sampling = False apply_bn_for_resampling = True layers = [] if apply_1x1_pre_sampling: layers.extend( bifpn_utils.create_conv_block( name=name + '1x1_pre_sample/', num_filters=output_num_channels, kernel_size=1, strides=1, padding='SAME', use_separable=False, apply_batchnorm=apply_bn_for_resampling, apply_activation=False, conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) layers.extend( bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale, downsample_method, use_native_resize_op, conv_hyperparams, is_training, freeze_batchnorm, name)) if apply_1x1_post_sampling: layers.extend( bifpn_utils.create_conv_block( name=name + '1x1_post_sample/', num_filters=output_num_channels, kernel_size=1, strides=1, padding='SAME', use_separable=False, apply_batchnorm=apply_bn_for_resampling, apply_activation=False, conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) return layers def _create_bifpn_combine_op(num_inputs, name, combine_method): """Creates a BiFPN output config, a list of the output BiFPN node names. Args: num_inputs: The number of inputs to this combine operation. name: String. The name of this combine operation. combine_method: String. The method used to combine input feature maps. Returns: A function which may be called with a list of num_inputs feature maps and which will return a single feature map. """ combine_op = None if num_inputs < 1: raise ValueError('Expected at least 1 input for BiFPN combine.') elif num_inputs == 1: combine_op = lambda x: x[0] else: combine_op = bifpn_utils.BiFPNCombineLayer( combine_method=combine_method, name=name) return combine_op class KerasBiFpnFeatureMaps(tf.keras.Model): """Generates Keras based BiFPN feature maps from an input feature map pyramid. A Keras model that generates multi-scale feature maps for detection by iteratively computing top-down and bottom-up feature pyramids, as in the EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details. """ def __init__(self, bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, is_training, conv_hyperparams, freeze_batchnorm, bifpn_node_params=None, name=None): """Constructor. Args: bifpn_num_iterations: The number of top-down bottom-up iterations. bifpn_num_filters: The number of filters (channels) to be used for all feature maps in this BiFPN. fpn_min_level: The minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: The maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: The maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid levels from input_max_level up to the desired fpn_max_level, with each successivel level downsampling by a scale factor of 2 by default. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. bifpn_node_params: An optional dictionary that may be used to specify default parameters for BiFPN nodes, without the need to provide a custom bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all BiFPN nodes will combine input feature maps by summation, rather than by the default fast attention method. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasBiFpnFeatureMaps, self).__init__(name=name) bifpn_node_config = _create_bifpn_node_config( bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, bifpn_node_params) bifpn_input_config = _create_bifpn_input_config( fpn_min_level, fpn_max_level, input_max_level) bifpn_output_node_names = _get_bifpn_output_node_names( fpn_min_level, fpn_max_level, bifpn_node_config) self.bifpn_node_config = bifpn_node_config self.bifpn_output_node_names = bifpn_output_node_names self.node_input_blocks = [] self.node_combine_op = [] self.node_post_combine_block = [] all_node_params = bifpn_input_config all_node_names = [node['name'] for node in all_node_params] for node_config in bifpn_node_config: # Maybe transform and/or resample input feature maps. input_blocks = [] for input_name in node_config['inputs']: if input_name not in all_node_names: raise ValueError( 'Input feature map ({}) does not exist:'.format(input_name)) input_index = all_node_names.index(input_name) input_params = all_node_params[input_index] input_block = node_config['input_op']( name='{}/input_{}/'.format(node_config['name'], input_name), input_scale=input_params['scale'], input_num_channels=input_params.get('num_channels', None), output_scale=node_config['scale'], output_num_channels=node_config['num_channels'], conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm) input_blocks.append((input_index, input_block)) # Combine input feature maps. combine_op = _create_bifpn_combine_op( num_inputs=len(input_blocks), name=(node_config['name'] + '/combine'), combine_method=node_config['combine_method']) # Post-combine layers. post_combine_block = [] if node_config['post_combine_op']: post_combine_block.extend(node_config['post_combine_op']( name=node_config['name'] + '/post_combine/', conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) self.node_input_blocks.append(input_blocks) self.node_combine_op.append(combine_op) self.node_post_combine_block.append(post_combine_block) all_node_params.append(node_config) all_node_names.append(node_config['name']) def call(self, feature_pyramid): """Compute BiFPN feature maps from input feature pyramid. Executed when calling the `.__call__` method on input. Args: feature_pyramid: list of tuples of (tensor_name, image_feature_tensor). Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ feature_maps = [el[1] for el in feature_pyramid] output_feature_maps = [None for node in self.bifpn_output_node_names] for index, node in enumerate(self.bifpn_node_config): node_scope = 'node_{:02d}'.format(index) with tf.name_scope(node_scope): # Apply layer blocks to this node's input feature maps. input_block_results = [] for input_index, input_block in self.node_input_blocks[index]: block_result = feature_maps[input_index] for layer in input_block: block_result = layer(block_result) input_block_results.append(block_result) # Combine the resulting feature maps. node_result = self.node_combine_op[index](input_block_results) # Apply post-combine layer block if applicable. for layer in self.node_post_combine_block[index]: node_result = layer(node_result) feature_maps.append(node_result) if node['name'] in self.bifpn_output_node_names: index = self.bifpn_output_node_names.index(node['name']) output_feature_maps[index] = node_result return collections.OrderedDict( zip(self.bifpn_output_node_names, output_feature_maps))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/bidirectional_feature_pyramid_generators.py
bidirectional_feature_pyramid_generators.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobilenetV2 features.""" import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v2 from object_detection.utils import ops from object_detection.utils import shape_utils class SSDMobileNetV2KerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using MobilenetV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """MobileNetV2 Feature Extractor for SSD Models. Mobilenet v2 (experimental), designed by sandler@. More details can be found in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor (Functions as a width multiplier for the mobilenet_v2 network itself). min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV2KerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } self.classification_backbone = None self.feature_map_generator = None def build(self, input_shape): full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) conv2d_11_pointwise = full_mobilenet_v2.get_layer( name='block_13_expand_relu').output conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v2.inputs, outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) self.feature_map_generator = ( feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=self._feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_maps = self.feature_map_generator({ 'layer_15/expansion_output': image_features[0], 'layer_19': image_features[1]}) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py
ssd_mobilenet_v2_keras_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.ssd_inception_v2_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_inception_v2_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdInceptionV2FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, is_training=True): """Constructs a SsdInceptionV2FeatureExtractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. is_training: whether the network is in training mode. Returns: an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor. """ min_depth = 32 return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, num_layers=num_layers, override_base_feature_extractor_hyperparams=True) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 128), (2, 10, 10, 128), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'InceptionV2' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, num_layers=4) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py
ssd_inception_v2_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception V2 Faster R-CNN implementation. See "Rethinking the Inception Architecture for Computer Vision" https://arxiv.org/abs/1512.00567 """ import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import inception_v2 def _batch_norm_arg_scope(list_ops, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, batch_norm_scale=False, train_batch_norm=False): """Slim arg scope for InceptionV2 batch norm.""" if use_batch_norm: batch_norm_params = { 'is_training': train_batch_norm, 'scale': batch_norm_scale, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon } normalizer_fn = slim.batch_norm else: normalizer_fn = None batch_norm_params = None return slim.arg_scope(list_ops, normalizer_fn=normalizer_fn, normalizer_params=batch_norm_params) class FasterRCNNInceptionV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Inception V2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, depth_multiplier=1.0, min_depth=16): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._depth_multiplier = depth_multiplier self._min_depth = min_depth super(FasterRCNNInceptionV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Inception V2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): _, activations = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_4e', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) return activations['Mixed_4e'], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ net = proposal_feature_maps depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth) trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) data_format = 'NHWC' concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope('InceptionV2', reuse=self._reuse_weights): with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): with tf.variable_scope('Mixed_5a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat([branch_0, branch_1, branch_2], concat_dim) with tf.variable_scope('Mixed_5b'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat([branch_0, branch_1, branch_2, branch_3], concat_dim) with tf.variable_scope('Mixed_5c'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') proposal_classifier_features = tf.concat( [branch_0, branch_1, branch_2, branch_3], concat_dim) return proposal_classifier_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py
faster_rcnn_inception_v2_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper from nets import inception_resnet_v2 class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: return inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True) def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope='Conv2d_7b_1x1') return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO(jonathanhuang,rathodv): revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') var_name = var_name.replace( second_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py
faster_rcnn_inception_resnet_v2_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for bidirectional feature pyramid generators.""" import unittest from absl.testing import parameterized import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version @parameterized.parameters({'bifpn_num_iterations': 2}, {'bifpn_num_iterations': 8}) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class BiFPNFeaturePyramidGeneratorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } force_use_bias: true """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_get_expected_feature_map_shapes(self, bifpn_num_iterations): with test_utils.GraphContextOrNone() as g: image_features = [ ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)) ] bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps( bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=128, fpn_min_level=3, fpn_max_level=7, input_max_level=5, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False) def graph_fn(): feature_maps = bifpn_generator(image_features) return feature_maps expected_feature_map_shapes = { '{}_dn_lvl_3'.format(bifpn_num_iterations): (4, 16, 16, 128), '{}_up_lvl_4'.format(bifpn_num_iterations): (4, 8, 8, 128), '{}_up_lvl_5'.format(bifpn_num_iterations): (4, 4, 4, 128), '{}_up_lvl_6'.format(bifpn_num_iterations): (4, 2, 2, 128), '{}_up_lvl_7'.format(bifpn_num_iterations): (4, 1, 1, 128)} out_feature_maps = self.execute(graph_fn, [], g) out_feature_map_shapes = dict( (key, value.shape) for key, value in out_feature_maps.items()) self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) def test_get_expected_variable_names(self, bifpn_num_iterations): with test_utils.GraphContextOrNone() as g: image_features = [ ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)) ] bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps( bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=128, fpn_min_level=3, fpn_max_level=7, input_max_level=5, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, name='bifpn') def graph_fn(): return bifpn_generator(image_features) self.execute(graph_fn, [], g) expected_variables = [ 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/bias', 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/kernel', 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias', 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel', 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias', 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel', 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/bias', 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/kernel', 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias', 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel', 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias', 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel'] expected_node_variable_patterns = [ ['bifpn/node_{:02}/{}_dn_lvl_6/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_dn_lvl_5/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_dn_lvl_4/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_dn_lvl_3/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_up_lvl_4/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_up_lvl_5/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_up_lvl_6/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/pointwise_kernel'], ['bifpn/node_{:02}/{}_up_lvl_7/combine/bifpn_combine_weights', 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/bias', 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/depthwise_kernel', 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/pointwise_kernel']] node_i = 2 for iter_i in range(1, bifpn_num_iterations+1): for node_variable_patterns in expected_node_variable_patterns: for pattern in node_variable_patterns: expected_variables.append(pattern.format(node_i, iter_i)) node_i += 1 expected_variables = set(expected_variables) actual_variable_set = set( [var.name.split(':')[0] for var in bifpn_generator.variables]) self.assertSetEqual(expected_variables, actual_variable_set) # TODO(aom): Tests for create_bifpn_combine_op. if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/bidirectional_feature_pyramid_generators_tf2_test.py
bidirectional_feature_pyramid_generators_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v2_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SsdMobilenetV2FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, use_keras=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. use_keras: unused argument. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ del use_keras min_depth = 32 return (ssd_mobilenet_v2_keras_feature_extractor. SSDMobileNetV2KerasFeatureExtractor( is_training=False, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, use_explicit_padding=use_explicit_padding, num_layers=num_layers, name='MobilenetV2')) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=True) def test_extract_features_returns_correct_shapes_128_explicit_padding( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_with_dynamic_inputs( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=True) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=True) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=True) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=True) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV2' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name, use_keras=True) def test_variable_count(self): depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=True) self.assertEqual(len(variables), 292) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, num_layers=4, use_keras=True) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py
ssd_mobilenet_v2_feature_extractor_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd resnet v1 FPN feature extractors.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SSDResnet50V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet50v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=True): is_training = True return (ssd_resnet_v1_fpn_keras_feature_extractor. SSDResNet50V1FpnKerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, name='ResNet50V1_FPN')) def _resnet_scope_name(self): return 'ResNet50V1_FPN' @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SSDResnet101V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet101v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): is_training = True return (ssd_resnet_v1_fpn_keras_feature_extractor. SSDResNet101V1FpnKerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, name='ResNet101V1_FPN')) def _resnet_scope_name(self): return 'ResNet101V1_FPN' @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SSDResnet152V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet152v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): is_training = True return (ssd_resnet_v1_fpn_keras_feature_extractor. SSDResNet152V1FpnKerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, name='ResNet152V1_FPN')) def _resnet_scope_name(self): return 'ResNet152V1_FPN' if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py
ssd_resnet_v1_fpn_feature_extractor_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation in Keras. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ # Skip pylint for this file because it times out # pylint: skip-file import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models.keras_models import inception_resnet_v2 from object_detection.utils import model_util from object_detection.utils import variables_helper class FasterRCNNInceptionResnetV2KerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, weight_decay) self._variable_dict = {} self.classification_backbone = None def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A tensor with shape [batch, height, width, depth] """ if not self.classification_backbone: self.classification_backbone = inception_resnet_v2.inception_resnet_v2( self._train_batch_norm, output_stride=self._first_stage_features_stride, align_feature_maps=True, weight_decay=self._weight_decay, weights=None, include_top=False) with tf.name_scope(name): with tf.name_scope('InceptionResnetV2'): proposal_features = self.classification_backbone.get_layer( name='block17_20_ac').output keras_model = tf.keras.Model( inputs=self.classification_backbone.inputs, outputs=proposal_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `get_proposal_feature_extractor_model`. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ if not self.classification_backbone: self.classification_backbone = inception_resnet_v2.inception_resnet_v2( self._train_batch_norm, output_stride=self._first_stage_features_stride, align_feature_maps=True, weight_decay=self._weight_decay, weights=None, include_top=False) with tf.name_scope(name): with tf.name_scope('InceptionResnetV2'): proposal_feature_maps = self.classification_backbone.get_layer( name='block17_20_ac').output proposal_classifier_features = self.classification_backbone.get_layer( name='conv_7b_ac').output keras_model = model_util.extract_submodel( model=self.classification_backbone, inputs=proposal_feature_maps, outputs=proposal_classifier_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py
faster_rcnn_inception_resnet_v2_keras_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Embedded-friendly SSDFeatureExtractor for MobilenetV1 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from nets import mobilenet_v1 class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """Embedded-friendly SSD Feature Extractor using MobilenetV1 features. This feature extractor is similar to SSD MobileNetV1 feature extractor, and it fixes input resolution to be 256x256, reduces the number of feature maps used for box prediction and ensures convolution kernel to be no larger than input tensor in spatial dimensions. This feature extractor requires support of the following ops if used in embedded devices: - Conv - DepthwiseConv - Relu6 All conv/depthwiseconv use SAME padding, and no additional spatial padding is needed. """ def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. For EmbeddedSSD it must be set to 1. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: upon invalid `pad_to_multiple` values. """ if pad_to_multiple != 1: raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` ' 'of 1.') super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: if image height or width are not 256 pixels. """ image_shape = preprocessed_inputs.get_shape() image_shape.assert_has_rank(4) image_height = image_shape[1].value image_width = image_shape[2].value if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256), tf.equal(tf.shape(preprocessed_inputs)[2], 256)), ['image size must be 256 in both height and width.']) with tf.control_dependencies([shape_assert]): preprocessed_inputs = tf.identity(preprocessed_inputs) elif image_height != 256 or image_width != 256: raise ValueError('image size must be = 256 in both height and width;' ' image dim = %d,%d' % (image_height, image_width)) feature_map_layout = { 'from_layer': [ 'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '' ], 'layer_depth': [-1, -1, 512, 256, 256], 'conv_kernel_size': [-1, -1, 3, 3, 2], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py
embedded_ssd_mobilenet_v1_feature_extractor.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mobilenet v1 Faster R-CNN implementation.""" import numpy as np import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import shape_utils from nets import mobilenet_v1 def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage): if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]: raise ValueError( 'Only the following ratio percentages are supported: 25, 50, 75, 100') conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0 channels = np.array([ 32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024 ], dtype=np.float32) channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32) return [ mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13]) ] class FasterRCNNMobilenetV1FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Mobilenet V1 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, depth_multiplier=1.0, min_depth=16, skip_last_stride=False, conv_depth_ratio_in_percentage=100): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. skip_last_stride: Skip the last stride if True. conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only applied if skip_last_stride is True. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._skip_last_stride = skip_last_stride self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Mobilenet V1 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) preprocessed_inputs = shape_utils.check_min_image_dim( min_dim=33, image_tensor=preprocessed_inputs) with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=self._train_batch_norm, weight_decay=self._weight_decay)): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: params = {} if self._skip_last_stride: params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs( conv_depth_ratio_in_percentage=self. _conv_depth_ratio_in_percentage) _, activations = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_11_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope, **params) return activations['Conv2d_11_pointwise'], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ net = proposal_feature_maps conv_depth = 1024 if self._skip_last_stride: conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0 conv_depth = int(float(conv_depth) * conv_depth_ratio) depth = lambda d: max(int(d * 1.0), 16) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights): with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=self._train_batch_norm, weight_decay=self._weight_decay)): with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], padding='SAME'): net = slim.separable_conv2d( net, depth(conv_depth), [3, 3], depth_multiplier=1, stride=2, scope='Conv2d_12_pointwise') return slim.separable_conv2d( net, depth(conv_depth), [3, 3], depth_multiplier=1, stride=1, scope='Conv2d_13_pointwise')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py
faster_rcnn_mobilenet_v1_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SSD Mobilenet V1 feature extractors. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V1 feature extractors in SSD. """ import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV1FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, is_training=False, use_keras=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. is_training: whether the network is in training mode. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 del use_keras return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding, num_layers=num_layers) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_with_dynamic_image_shape(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=False) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV1' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name, use_keras=False) def test_variable_count(self): depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=False) self.assertEqual(len(variables), 151) def test_has_fused_batchnorm(self): image_height = 40 image_width = 40 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue( any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations())) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, num_layers=4, use_keras=False) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py
ssd_mobilenet_v1_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1 from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride, activation_fn=tf.nn.relu, architecture='resnet_v1_101'): feature_extractor_map = { 'resnet_v1_50': faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'resnet_v1_101': faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'resnet_v1_152': faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor } return feature_extractor_map[architecture]( is_training=False, first_stage_features_stride=first_stage_features_stride, activation_fn=activation_fn, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: feature_extractor = self._build_feature_extractor( first_stage_features_stride=16, architecture=architecture) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 1024]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 28, 28, 1024]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 1024]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_on_very_small_images(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run( features_shape, feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [3, 7, 7, 1024], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [3, 7, 7, 2048]) def test_overwriting_activation_fn(self): for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: feature_extractor = self._build_feature_extractor( first_stage_features_stride=16, architecture=architecture, activation_fn=tf.nn.relu6) preprocessed_inputs = tf.random_uniform([4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestStage1Scope') _ = feature_extractor.extract_box_classifier_features( rpn_feature_map, scope='TestStaget2Scope') conv_ops = [ op for op in tf.get_default_graph().get_operations() if op.type == 'Relu6' ] op_names = [op.name for op in conv_ops] self.assertIsNotNone(conv_ops) self.assertIn('TestStage1Scope/resnet_v1_50/resnet_v1_50/conv1/Relu6', op_names) self.assertIn( 'TestStaget2Scope/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/Relu6', op_names) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py
faster_rcnn_resnet_v1_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobilenetV1 PPN features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 PPN features.""" def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=0, num_layers=6, image_features={ 'image_features': image_features['Conv2d_11_pointwise'] }) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py
ssd_mobilenet_v1_ppn_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing hourglass feature extractor for CenterNet.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import center_net_hourglass_feature_extractor as hourglass from object_detection.models.keras_models import hourglass_network from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetHourglassFeatureExtractorTest(test_case.TestCase): def test_center_net_hourglass_feature_extractor(self): net = hourglass_network.HourglassNetwork( num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14], num_hourglasses=2) model = hourglass.CenterNetHourglassFeatureExtractor(net) def graph_fn(): return model(tf.zeros((2, 64, 64, 3), dtype=np.float32)) outputs = self.execute(graph_fn, []) self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py
center_net_hourglass_feature_extractor_tf2_test.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for PNASNet features. Based on PNASNet ImageNet model: https://arxiv.org/abs/1712.00559 """ import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import variables_helper try: from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Default is False. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with slim.arg_scope(imagenet_scope): with slim.arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using PNASNet features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """PNASNet Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDPNASNetFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ feature_map_layout = { 'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope( pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._is_training)): with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = pnasnet.build_pnasnet_large( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training, final_endpoint='Cell_11') with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet checkpoints. Args: feature_extractor_scope: A scope name for the first stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith(feature_extractor_scope): var_name = variable.op.name.replace(feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_pnasnet_feature_extractor.py
ssd_pnasnet_feature_extractor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing ResNet v2 models for the CenterNet meta architecture.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import center_net_resnet_feature_extractor from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetResnetFeatureExtractorTest(test_case.TestCase): def test_output_size(self): """Verify that shape of features returned by the backbone is correct.""" model = center_net_resnet_feature_extractor.\ CenterNetResnetFeatureExtractor('resnet_v2_101') def graph_fn(): img = np.zeros((8, 512, 512, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 128, 128, 64)) def test_output_size_resnet50(self): """Verify that shape of features returned by the backbone is correct.""" model = center_net_resnet_feature_extractor.\ CenterNetResnetFeatureExtractor('resnet_v2_50') def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 64)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py
center_net_resnet_feature_extractor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v1_fpn_feature_extractor. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V1 FPN feature extractors in SSD. """ import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SsdMobilenetV1FpnFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False, use_keras=True): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 del use_keras return (ssd_mobilenet_v1_fpn_keras_feature_extractor. SSDMobileNetV1FpnKerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, use_explicit_padding=use_explicit_padding, use_depthwise=True, name='MobilenetV1_FPN')) def test_extract_features_returns_correct_shapes_256(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_384(self): image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_with_dynamic_image_shape(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=True) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=True) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=True) def test_preprocess_returns_correct_value_range(self): image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple, use_keras=True) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py
ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v2_fpn_feature_extractor. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V2 FPN feature extractors in SSD. """ import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v2_fpn_keras_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') @parameterized.parameters( { 'use_depthwise': False, }, { 'use_depthwise': True, }, ) class SsdMobilenetV2FpnFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False, use_keras=False, use_depthwise=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. use_depthwise: Whether to use depthwise convolutions. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ del use_keras min_depth = 32 return (ssd_mobilenet_v2_fpn_keras_feature_extractor .SSDMobileNetV2FpnKerasFeatureExtractor( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams( add_batch_norm=False), freeze_batchnorm=False, inplace_batchnorm_update=False, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, name='MobilenetV2_FPN')) def test_extract_features_returns_correct_shapes_256(self, use_depthwise): use_keras = True image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_384(self, use_depthwise): use_keras = True image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_with_dynamic_image_shape(self, use_depthwise): use_keras = True image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self, use_depthwise): use_keras = True image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self, use_depthwise): use_keras = True image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_raises_error_with_invalid_image_size( self, use_depthwise=False): use_keras = True image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) def test_preprocess_returns_correct_value_range(self, use_depthwise): use_keras = True image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py
ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resnet based Faster R-CNN implementation in Keras. See Deep Residual Learning for Image Recognition by He et al. https://arxiv.org/abs/1512.03385 """ import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models.keras_models import resnet_v1 from object_detection.utils import model_util _RESNET_MODEL_CONV4_LAST_LAYERS = { 'resnet_v1_50': 'conv4_block6_out', 'resnet_v1_101': 'conv4_block23_out', 'resnet_v1_152': 'conv4_block36_out', } class FasterRCNNResnetKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster R-CNN with Resnet feature extractor implementation.""" def __init__(self, is_training, resnet_v1_base_model, resnet_v1_base_model_name, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNResnetKerasFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, weight_decay) self.classification_backbone = None self._variable_dict = {} self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the first half of the Resnet v1 network. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A tensor with shape [batch, height, width, depth] """ if not self.classification_backbone: self.classification_backbone = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=None, weight_decay=self._weight_decay, classes=None, weights=None, include_top=False ) with tf.name_scope(name): with tf.name_scope('ResnetV1'): conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ self._resnet_v1_base_model_name] proposal_features = self.classification_backbone.get_layer( name=conv4_last_layer).output keras_model = tf.keras.Model( inputs=self.classification_backbone.inputs, outputs=proposal_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. This function reconstructs the "second half" of the ResNet v1 network after the part defined in `get_proposal_feature_extractor_model`. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ if not self.classification_backbone: self.classification_backbone = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=None, weight_decay=self._weight_decay, classes=None, weights=None, include_top=False ) with tf.name_scope(name): with tf.name_scope('ResnetV1'): conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ self._resnet_v1_base_model_name] proposal_feature_maps = self.classification_backbone.get_layer( name=conv4_last_layer).output proposal_classifier_features = self.classification_backbone.get_layer( name='conv5_block3_out').output keras_model = model_util.extract_submodel( model=self.classification_backbone, inputs=proposal_feature_maps, outputs=proposal_classifier_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model class FasterRCNNResnet50KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet50 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) class FasterRCNNResnet101KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet101 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) class FasterRCNNResnet152KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet152 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py
faster_rcnn_resnet_keras_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Faster RCNN Keras-based Resnet V1 FPN Feature Extractor.""" import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 from object_detection.utils import ops _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', 'conv4_block36_out', 'conv5_block3_out'], } class _ResnetFPN(tf.keras.layers.Layer): """Construct Resnet FPN layer.""" def __init__(self, backbone_classifier, fpn_features_generator, coarse_feature_layers, pad_to_multiple, fpn_min_level, resnet_block_names, base_fpn_max_level): """Constructor. Args: backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152'. fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a dictionary of features and returns a ordered dictionary of fpn features. coarse_feature_layers: Coarse feature layers for fpn. pad_to_multiple: An integer multiple to pad input image. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet v1 layers. resnet_block_names: a list of block names of resnet. base_fpn_max_level: maximum level of fpn without coarse feature layers. """ super(_ResnetFPN, self).__init__() self.classification_backbone = backbone_classifier self.fpn_features_generator = fpn_features_generator self.coarse_feature_layers = coarse_feature_layers self.pad_to_multiple = pad_to_multiple self._fpn_min_level = fpn_min_level self._resnet_block_names = resnet_block_names self._base_fpn_max_level = base_fpn_max_level def call(self, inputs): """Create internal Resnet FPN layer. Args: inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. Returns: feature_maps: A list of tensors with shape [batch, height, width, depth] represent extracted features. """ inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) backbone_outputs = self.classification_backbone(inputs) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, backbone_outputs))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self.fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) last_feature_map = fpn_features['top_down_block{}'.format( self._base_fpn_max_level - 1)] for coarse_feature_layers in self.coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps class FasterRCNNResnetV1FpnKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" def __init__(self, is_training, resnet_v1_base_model, resnet_v1_base_model_name, first_stage_features_stride, conv_hyperparams, batch_norm_trainable=True, pad_to_multiple=32, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. first_stage_features_stride: See base class. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. batch_norm_trainable: See base class. pad_to_multiple: An integer multiple to pad input image. weight_decay: See base class. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet v1 layers. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name self._conv_hyperparams = conv_hyperparams self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._freeze_batchnorm = (not batch_norm_trainable) self._pad_to_multiple = pad_to_multiple self._override_base_feature_extractor_hyperparams = \ override_base_feature_extractor_hyperparams self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the Resnet v1 FPN network. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A list of tensors with shape [batch, height, width, depth] """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[ self._resnet_v1_base_model_name] outputs = [full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._additional_layer_depth, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D( self._additional_layer_depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) feature_extractor_model = _ResnetFPN(self.classification_backbone, self._fpn_features_generator, self._coarse_feature_layers, self._pad_to_multiple, self._fpn_min_level, self._resnet_block_names, self._base_fpn_max_level) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. Construct two fully connected layer to extract the box classifier features. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, 1, 1, 1024] representing box classifier features for each proposal. """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): feature_extractor_model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=1024, activation='relu'), self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm)), tf.keras.layers.Dense(units=1024, activation='relu'), tf.keras.layers.Reshape((1, 1, 1024)) ]) return feature_extractor_model class FasterRCNNResnet50FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet50 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams ) class FasterRCNNResnet101FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet101 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class FasterRCNNResnet152FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet152 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py
faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v2_nas_fpn_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v2_mnasfpn_feature_extractor as mnasfpn_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV2MnasFPNFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False): min_depth = 16 is_training = True fpn_num_filters = 48 return mnasfpn_feature_extractor.SSDMobileNetV2MnasFPNFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, additional_layer_depth=fpn_num_filters, use_explicit_padding=use_explicit_padding) def test_extract_features_returns_correct_shapes_320_256(self): image_height = 320 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 32, 48), (2, 20, 16, 48), (2, 10, 8, 48), (2, 5, 4, 48)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 16), (2, 16, 16, 16), (2, 8, 8, 16), (2, 4, 4, 16)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_preprocess_returns_correct_value_range(self): image_height = 320 image_width = 320 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py
ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 299, 299, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 19, 19, 1088]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 28, 28, 1088]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 1088]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [2, 17, 17, 1088], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [2, 8, 8, 1536]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py
faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobileDet features.""" import functools import numpy as np import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils BACKBONE_WEIGHT_DECAY = 4e-5 def _scale_filters(filters, multiplier, base=8): """Scale the filters accordingly to (multiplier, base).""" round_half_up = int(int(filters) * multiplier / base + 0.5) result = int(round_half_up * base) return max(result, base) def _swish6(h): with tf.name_scope('swish6'): return h * tf.nn.relu6(h + np.float32(3)) * np.float32(1. / 6.) def _conv(h, filters, kernel_size, strides=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6): if activation_fn is None: raise ValueError('Activation function cannot be None. Use tf.identity ' 'instead to better support quantized training.') return slim.conv2d( h, filters, kernel_size, stride=strides, activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), padding='SAME') def _separable_conv( h, filters, kernel_size, strides=1, activation_fn=tf.nn.relu6): """Separable convolution layer.""" if activation_fn is None: raise ValueError('Activation function cannot be None. Use tf.identity ' 'instead to better support quantized training.') # Depthwise variant of He initialization derived under the principle proposed # in the original paper. Note the original He normalization was designed for # full convolutions and calling tf.initializers.he_normal() can over-estimate # the fan-in of a depthwise kernel by orders of magnitude. stddev = (2.0 / kernel_size**2)**0.5 / .87962566103423978 depthwise_initializer = tf.initializers.truncated_normal(stddev=stddev) return slim.separable_conv2d( h, filters, kernel_size, stride=strides, activation_fn=activation_fn, normalizer_fn=slim.batch_norm, weights_initializer=depthwise_initializer, pointwise_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), padding='SAME') def _squeeze_and_excite(h, hidden_dim, activation_fn=tf.nn.relu6): with tf.variable_scope(None, default_name='SqueezeExcite'): height, width = h.shape[1], h.shape[2] u = slim.avg_pool2d(h, [height, width], stride=1, padding='VALID') u = _conv(u, hidden_dim, 1, normalizer_fn=None, activation_fn=activation_fn) u = _conv(u, h.shape[-1], 1, normalizer_fn=None, activation_fn=tf.nn.sigmoid) return u * h def _inverted_bottleneck_no_expansion( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, strides=1, use_se=False): """Inverted bottleneck layer without the first 1x1 expansion convolution.""" with tf.variable_scope(None, default_name='IBNNoExpansion'): # Setting filters to None will make _separable_conv a depthwise conv. h = _separable_conv( h, None, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(h.shape[-1], 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) return h def _inverted_bottleneck( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): """Inverted bottleneck layer.""" with tf.variable_scope(None, default_name='IBN'): shortcut = h expanded_filters = int(h.shape[-1]) * expansion if expansion <= 1: raise ValueError('Expansion factor must be greater than 1.') h = _conv(h, expanded_filters, 1, activation_fn=activation_fn) # Setting filters to None will make _separable_conv a depthwise conv. h = _separable_conv(h, None, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(expanded_filters, 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def _fused_conv( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): """Fused convolution layer.""" with tf.variable_scope(None, default_name='FusedConv'): shortcut = h expanded_filters = int(h.shape[-1]) * expansion if expansion <= 1: raise ValueError('Expansion factor must be greater than 1.') h = _conv(h, expanded_filters, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(expanded_filters, 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def _tucker_conv( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, input_rank_ratio=0.25, output_rank_ratio=0.25, strides=1, residual=True): """Tucker convolution layer (generalized bottleneck).""" with tf.variable_scope(None, default_name='TuckerConv'): shortcut = h input_rank = _scale_filters(h.shape[-1], input_rank_ratio) h = _conv(h, input_rank, 1, activation_fn=activation_fn) output_rank = _scale_filters(filters, output_rank_ratio) h = _conv(h, output_rank, kernel_size, strides=strides, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def mobiledet_cpu_backbone(h, multiplier=1.0): """Build a MobileDet CPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial( _inverted_bottleneck, use_se=True, activation_fn=_swish6) endpoints = {} h = _conv(h, _scale(16), 3, strides=2, activation_fn=_swish6) h = _inverted_bottleneck_no_expansion( h, _scale(8), use_se=True, activation_fn=_swish6) endpoints['C1'] = h h = ibn(h, _scale(16), expansion=4, strides=2, residual=False) endpoints['C2'] = h h = ibn(h, _scale(32), expansion=8, strides=2, residual=False) h = ibn(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) endpoints['C3'] = h h = ibn(h, _scale(72), kernel_size=5, expansion=8, strides=2, residual=False) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), kernel_size=5, expansion=4) h = ibn(h, _scale(72), expansion=4) h = ibn(h, _scale(72), expansion=8, residual=False) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(104), kernel_size=5, expansion=8, strides=2, residual=False) h = ibn(h, _scale(104), kernel_size=5, expansion=4) h = ibn(h, _scale(104), kernel_size=5, expansion=4) h = ibn(h, _scale(104), expansion=4) h = ibn(h, _scale(144), expansion=8, residual=False) endpoints['C5'] = h return endpoints def mobiledet_dsp_backbone(h, multiplier=1.0): """Build a MobileDet DSP backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) h = _inverted_bottleneck_no_expansion( h, _scale(24), activation_fn=tf.nn.relu6) endpoints['C1'] = h h = fused(h, _scale(32), expansion=4, strides=2, residual=False) h = fused(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.75) endpoints['C2'] = h h = fused(h, _scale(64), expansion=8, strides=2, residual=False) h = ibn(h, _scale(64), expansion=4) h = fused(h, _scale(64), expansion=4) h = fused(h, _scale(64), expansion=4) endpoints['C3'] = h h = fused(h, _scale(120), expansion=8, strides=2, residual=False) h = ibn(h, _scale(120), expansion=4) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(120), expansion=8) h = fused(h, _scale(144), expansion=8, residual=False) h = ibn(h, _scale(144), expansion=8) h = ibn(h, _scale(144), expansion=8) h = ibn(h, _scale(144), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(160), expansion=4, strides=2, residual=False) h = ibn(h, _scale(160), expansion=4) h = fused(h, _scale(160), expansion=4) h = tucker(h, _scale(160), input_rank_ratio=0.75, output_rank_ratio=0.75) h = ibn(h, _scale(240), expansion=8, residual=False) endpoints['C5'] = h return endpoints def mobiledet_edgetpu_backbone(h, multiplier=1.0): """Build a MobileDet EdgeTPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) h = tucker(h, _scale(16), input_rank_ratio=0.25, output_rank_ratio=0.75, residual=False) endpoints['C1'] = h h = fused(h, _scale(16), expansion=8, strides=2, residual=False) h = fused(h, _scale(16), expansion=4) h = fused(h, _scale(16), expansion=8) h = fused(h, _scale(16), expansion=4) endpoints['C2'] = h h = fused(h, _scale(40), expansion=8, kernel_size=5, strides=2, residual=False) h = fused(h, _scale(40), expansion=4) h = fused(h, _scale(40), expansion=4) h = fused(h, _scale(40), expansion=4) endpoints['C3'] = h h = ibn(h, _scale(72), expansion=8, strides=2, residual=False) h = ibn(h, _scale(72), expansion=8) h = fused(h, _scale(72), expansion=4) h = fused(h, _scale(72), expansion=4) h = ibn(h, _scale(96), expansion=8, kernel_size=5, residual=False) h = ibn(h, _scale(96), expansion=8, kernel_size=5) h = ibn(h, _scale(96), expansion=8) h = ibn(h, _scale(96), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(120), expansion=8, kernel_size=5, strides=2, residual=False) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(120), expansion=4, kernel_size=5) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(384), expansion=8, kernel_size=5, residual=False) endpoints['C5'] = h return endpoints def mobiledet_gpu_backbone(h, multiplier=1.0): """Build a MobileDet GPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} # block 0 h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) # block 1 h = tucker( h, _scale(16), input_rank_ratio=0.25, output_rank_ratio=0.25, residual=False) endpoints['C1'] = h # block 2 h = fused(h, _scale(32), expansion=8, strides=2, residual=False) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) endpoints['C2'] = h # block 3 h = fused( h, _scale(64), expansion=8, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(64), expansion=8) h = fused(h, _scale(64), expansion=8) h = fused(h, _scale(64), expansion=4) endpoints['C3'] = h # block 4 h = fused( h, _scale(128), expansion=8, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) # block 5 h = fused( h, _scale(128), expansion=8, kernel_size=3, strides=1, residual=False) h = fused(h, _scale(128), expansion=8) h = fused(h, _scale(128), expansion=8) h = fused(h, _scale(128), expansion=8) endpoints['C4'] = h # block 6 h = fused( h, _scale(128), expansion=4, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) # block 7 h = ibn(h, _scale(384), expansion=8, kernel_size=3, strides=1, residual=False) endpoints['C5'] = h return endpoints class SSDMobileDetFeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): """Base class of SSD feature extractor using MobileDet features.""" def __init__(self, backbone_fn, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDet'): """MobileDet Feature Extractor for SSD Models. Reference: https://arxiv.org/abs/2004.14525 Args: backbone_fn: function to construct the MobileDet backbone. is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: Integer, minimum feature extractor depth (number of filters). pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. use_depthwise: Whether to use depthwise convolutions in the SSD head. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. scope_name: scope name (string) of network variables. """ if use_explicit_padding: raise NotImplementedError( 'Explicit padding is not yet supported in MobileDet backbones.') super(SSDMobileDetFeatureExtractorBase, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams ) self._backbone_fn = backbone_fn self._scope_name = scope_name def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. The preprocessing assumes an input value range of [0, 255]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) padded_inputs = ops.pad_to_multiple( preprocessed_inputs, self._pad_to_multiple) feature_map_layout = { 'from_layer': ['C4', 'C5', '', '', '', ''], # Do not specify the layer depths (number of filters) for C4 and C5, as # their values are determined based on the backbone. 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope(self._scope_name, reuse=self._reuse_weights): with slim.arg_scope([slim.batch_norm], is_training=self._is_training, epsilon=0.01, decay=0.99, center=True, scale=True): endpoints = self._backbone_fn( padded_inputs, multiplier=self._depth_multiplier) image_features = {'C4': endpoints['C4'], 'C5': endpoints['C5']} with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) class SSDMobileDetCPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-CPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetCPU'): super(SSDMobileDetCPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_cpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetDSPFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-DSP feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetDSP'): super(SSDMobileDetDSPFeatureExtractor, self).__init__( backbone_fn=mobiledet_dsp_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetEdgeTPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-EdgeTPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetEdgeTPU'): super(SSDMobileDetEdgeTPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_edgetpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetGPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-GPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetGPU'): super(SSDMobileDetGPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_gpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobiledet_feature_extractor.py
ssd_mobiledet_feature_extractor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_edgetpu_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetEdgeTPUFeatureExtractorTest( ssd_mobilenet_edgetpu_feature_extractor_testbase ._SsdMobilenetEdgeTPUFeatureExtractorTestBase): def _get_input_sizes(self): """Return first two input feature map sizes.""" return [384, 192] def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, use_keras=False): """Constructs a new MobileNetEdgeTPU feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return (ssd_mobilenet_edgetpu_feature_extractor .SSDMobileNetEdgeTPUFeatureExtractor( False, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py
ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base test class for ssd_mobilenet_v3_feature_extractor.""" import abc import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test class _SsdMobilenetV3FeatureExtractorTestBase( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): """Base class for MobilenetV3 tests.""" @abc.abstractmethod def _get_input_sizes(self): """Return feature map sizes for the two inputs to SSD head.""" pass def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), (2, 4, 4, input_feature_sizes[1]), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=False) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), (2, 10, 10, input_feature_sizes[1]), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=False) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), (2, 10, 10, input_feature_sizes[1]), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_has_fused_batchnorm(self): image_height = 40 image_width = 40 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue(any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations()))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py
ssd_mobilenet_v3_feature_extractor_testbase.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v2_fpn_feature_extractor. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V2 FPN feature extractors in SSD. """ import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') @parameterized.parameters( { 'use_depthwise': False }, { 'use_depthwise': True }, ) class SsdMobilenetV2FpnFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False, use_keras=False, use_depthwise=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. use_depthwise: Whether to use depthwise convolutions. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ del use_keras min_depth = 32 return (ssd_mobilenet_v2_fpn_feature_extractor .SSDMobileNetV2FpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding)) def test_extract_features_returns_correct_shapes_256(self, use_depthwise): use_keras = False image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_384(self, use_depthwise): use_keras = False image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_with_dynamic_image_shape(self, use_depthwise): use_keras = False image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self, use_depthwise): use_keras = False image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self, use_depthwise): use_keras = False image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=use_keras, use_depthwise=use_depthwise) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=use_keras, use_depthwise=use_depthwise) def test_extract_features_raises_error_with_invalid_image_size( self, use_depthwise): use_keras = False image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) def test_preprocess_returns_correct_value_range(self, use_depthwise): use_keras = False image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self, use_depthwise): use_keras = False depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV2' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name, use_keras=use_keras, use_depthwise=use_depthwise) def test_fused_batchnorm(self, use_depthwise): use_keras = False image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue( any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations())) def test_variable_count(self, use_depthwise): use_keras = False depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) expected_variables_len = 274 if use_depthwise: expected_variables_len = 278 self.assertEqual(len(variables), expected_variables_len) def test_get_expected_feature_map_variable_names(self, use_depthwise): use_keras = False depth_multiplier = 1.0 pad_to_multiple = 1 slim_expected_feature_maps_variables = set([ # Slim Mobilenet V2 feature maps 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', 'MobilenetV2/Conv_1/weights', # FPN layers 'MobilenetV2/fpn/bottom_up_Conv2d_20/weights', 'MobilenetV2/fpn/bottom_up_Conv2d_21/weights', 'MobilenetV2/fpn/smoothing_1/weights', 'MobilenetV2/fpn/smoothing_2/weights', 'MobilenetV2/fpn/projection_1/weights', 'MobilenetV2/fpn/projection_2/weights', 'MobilenetV2/fpn/projection_3/weights', ]) slim_expected_feature_maps_variables_with_depthwise = set([ # Slim Mobilenet V2 feature maps 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', 'MobilenetV2/Conv_1/weights', # FPN layers 'MobilenetV2/fpn/bottom_up_Conv2d_20/pointwise_weights', 'MobilenetV2/fpn/bottom_up_Conv2d_20/depthwise_weights', 'MobilenetV2/fpn/bottom_up_Conv2d_21/pointwise_weights', 'MobilenetV2/fpn/bottom_up_Conv2d_21/depthwise_weights', 'MobilenetV2/fpn/smoothing_1/depthwise_weights', 'MobilenetV2/fpn/smoothing_1/pointwise_weights', 'MobilenetV2/fpn/smoothing_2/depthwise_weights', 'MobilenetV2/fpn/smoothing_2/pointwise_weights', 'MobilenetV2/fpn/projection_1/weights', 'MobilenetV2/fpn/projection_2/weights', 'MobilenetV2/fpn/projection_3/weights', ]) g = tf.Graph() with g.as_default(): preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) _ = feature_extractor.extract_features(preprocessed_inputs) expected_feature_maps_variables = slim_expected_feature_maps_variables if use_depthwise: expected_feature_maps_variables = ( slim_expected_feature_maps_variables_with_depthwise) actual_variable_set = set([ var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ]) variable_intersection = expected_feature_maps_variables.intersection( actual_variable_set) self.assertSetEqual(expected_feature_maps_variables, variable_intersection) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py
ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD MobilenetV2 NAS-FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 Block = collections.namedtuple( 'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size']) _MNASFPN_CELL_CONFIG = [ Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256), Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128), Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128), Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128), Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96), ] MNASFPN_DEF = dict( feature_levels=[3, 4, 5, 6], spec=[_MNASFPN_CELL_CONFIG] * 4, ) def _maybe_pad(feature, use_explicit_padding, kernel_size=3): return ops.fixed_padding(feature, kernel_size) if use_explicit_padding else feature # Wrapper around mobilenet.depth_multiplier def _apply_multiplier(d, multiplier, min_depth): p = {'num_outputs': d} mobilenet.depth_multiplier( p, multiplier=multiplier, divisible_by=8, min_depth=min_depth) return p['num_outputs'] def _apply_size_dependent_ordering(input_feature, feature_level, block_level, expansion_size, use_explicit_padding, use_native_resize_op): """Applies Size-Dependent-Ordering when resizing feature maps. See https://arxiv.org/abs/1912.01106 Args: input_feature: input feature map to be resized. feature_level: the level of the input feature. block_level: the desired output level for the block. expansion_size: the expansion size for the block. use_explicit_padding: Whether to use explicit padding. use_native_resize_op: Whether to use native resize op. Returns: A transformed feature at the desired resolution and expansion size. """ padding = 'VALID' if use_explicit_padding else 'SAME' if feature_level >= block_level: # Perform 1x1 then upsampling. node = slim.conv2d( input_feature, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') if feature_level == block_level: return node scale = 2**(feature_level - block_level) if use_native_resize_op: input_shape = shape_utils.combined_static_and_dynamic_shape(node) node = tf.image.resize_nearest_neighbor( node, [input_shape[1] * scale, input_shape[2] * scale]) else: node = ops.nearest_neighbor_upsampling(node, scale=scale) else: # Perform downsampling then 1x1. stride = 2**(block_level - feature_level) node = slim.max_pool2d( _maybe_pad(input_feature, use_explicit_padding), [3, 3], stride=[stride, stride], padding=padding, scope='Downsample') node = slim.conv2d( node, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') return node def _mnasfpn_cell(feature_maps, feature_levels, cell_spec, output_channel=48, use_explicit_padding=False, use_native_resize_op=False, multiplier_func=None): """Create a MnasFPN cell. Args: feature_maps: input feature maps. feature_levels: levels of the feature maps. cell_spec: A list of Block configs. output_channel: Number of features for the input, output and intermediate feature maps. use_explicit_padding: Whether to use explicit padding. use_native_resize_op: Whether to use native resize op. multiplier_func: Depth-multiplier function. If None, use identity function. Returns: A transformed list of feature maps at the same resolutions as the inputs. """ # This is the level where multipliers are realized. if multiplier_func is None: multiplier_func = lambda x: x num_outputs = len(feature_maps) cell_features = list(feature_maps) cell_levels = list(feature_levels) padding = 'VALID' if use_explicit_padding else 'SAME' for bi, block in enumerate(cell_spec): with tf.variable_scope('block_{}'.format(bi)): block_level = block.output_level intermediate_feature = None for i, inp in enumerate(block.inputs): with tf.variable_scope('input_{}'.format(i)): input_level = cell_levels[inp] node = _apply_size_dependent_ordering( cell_features[inp], input_level, block_level, multiplier_func(block.expansion_size), use_explicit_padding, use_native_resize_op) # Add features incrementally to avoid producing AddN, which doesn't # play well with TfLite. if intermediate_feature is None: intermediate_feature = node else: intermediate_feature += node node = tf.nn.relu6(intermediate_feature) node = slim.separable_conv2d( _maybe_pad(node, use_explicit_padding, block.kernel_size), multiplier_func(output_channel), block.kernel_size, activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='SepConv') cell_features.append(node) cell_levels.append(block_level) # Cell-wide residuals. out_idx = range(len(cell_features) - num_outputs, len(cell_features)) for in_i, out_i in enumerate(out_idx): if cell_features[out_i].shape.as_list( ) == cell_features[in_i].shape.as_list(): cell_features[out_i] += cell_features[in_i] return cell_features[-num_outputs:] def mnasfpn(feature_maps, head_def, output_channel=48, use_explicit_padding=False, use_native_resize_op=False, multiplier_func=None): """Create the MnasFPN head given head_def.""" features = feature_maps for ci, cell_spec in enumerate(head_def['spec']): with tf.variable_scope('cell_{}'.format(ci)): features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec, output_channel, use_explicit_padding, use_native_resize_op, multiplier_func) return features def training_scope(l2_weight_decay=1e-4, is_training=None): """Arg scope for training MnasFPN.""" with slim.arg_scope( [slim.conv2d], weights_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ slim.arg_scope( [slim.separable_conv2d], weights_initializer=tf.initializers.truncated_normal( stddev=0.536), # He_normal for 3x3 depthwise kernel. weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ slim.arg_scope([slim.batch_norm], is_training=is_training, epsilon=0.01, decay=0.99, center=True, scale=True) as s: return s class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 MnasFPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=6, additional_layer_depth=48, head_def=None, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, data_format='channels_last'): """SSD MnasFPN feature extractor based on Mobilenet v2 architecture. See https://arxiv.org/abs/1912.01106 Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in MnasFPN. Currently the only valid value is 3. fpn_max_level: the smallest resolution feature map to construct or use in MnasFPN. Currentl the only valid value is 6. additional_layer_depth: additional feature map layer channel depth for NAS-FPN. head_def: A dictionary specifying the MnasFPN head architecture. Default uses MNASFPN_DEF. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use native resize op. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. data_format: The ordering of the dimensions in the inputs, The valid values are {'channels_first', 'channels_last'). """ super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) if fpn_min_level != 3 or fpn_max_level != 6: raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.') self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._fpn_layer_depth = additional_layer_depth self._head_def = head_def if head_def else MNASFPN_DEF self._data_format = data_format self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _verify_config(self, inputs): """Verify that MnasFPN config and its inputs.""" num_inputs = len(inputs) assert len(self._head_def['feature_levels']) == num_inputs base_width = inputs[0].shape.as_list( )[1] * 2**self._head_def['feature_levels'][0] for i in range(1, num_inputs): width = inputs[i].shape.as_list()[1] level = self._head_def['feature_levels'][i] expected_width = base_width // 2**level if width != expected_width: raise ValueError( 'Resolution of input {} does not match its level {}.'.format( i, level)) for cell_spec in self._head_def['spec']: # The last K nodes in a cell are the inputs to the next cell. Assert that # their feature maps are at the right level. for i in range(num_inputs): if cell_spec[-num_inputs + i].output_level != self._head_def['feature_levels'][i]: raise ValueError( 'Mismatch between node level {} and desired output level {}.' .format(cell_spec[-num_inputs + i].output_level, self._head_def['feature_levels'][i])) # Assert that each block only uses precending blocks. for bi, block_spec in enumerate(cell_spec): for inp in block_spec.inputs: if inp >= bi + num_inputs: raise ValueError( 'Block {} is trying to access uncreated block {}.'.format( bi, inp)) def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with slim.arg_scope( training_scope(l2_weight_decay=4e-5, is_training=self._is_training)): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_18', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) multiplier_func = functools.partial( _apply_multiplier, multiplier=self._depth_multiplier, min_depth=self._min_depth) with tf.variable_scope('MnasFPN', reuse=self._reuse_weights): with slim.arg_scope( training_scope(l2_weight_decay=1e-4, is_training=self._is_training)): # Create C6 by downsampling C5. c6 = slim.max_pool2d( _maybe_pad(image_features['layer_18'], self._use_explicit_padding), [3, 3], stride=[2, 2], padding='VALID' if self._use_explicit_padding else 'SAME', scope='C6_downsample') c6 = slim.conv2d( c6, multiplier_func(self._fpn_layer_depth), [1, 1], activation_fn=tf.identity, normalizer_fn=slim.batch_norm, weights_regularizer=None, # this 1x1 has no kernel regularizer. padding='VALID', scope='C6_Conv1x1') image_features['C6'] = tf.identity(c6) # Needed for quantization. for k in sorted(image_features.keys()): tf.logging.error('{}: {}'.format(k, image_features[k])) mnasfpn_inputs = [ image_features['layer_7'], # C3 image_features['layer_14'], # C4 image_features['layer_18'], # C5 image_features['C6'] # C6 ] self._verify_config(mnasfpn_inputs) feature_maps = mnasfpn( mnasfpn_inputs, head_def=self._head_def, output_channel=self._fpn_layer_depth, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, multiplier_func=multiplier_func) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py
ssd_mobilenet_v2_mnasfpn_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet V2[1] feature extractor for CenterNet[2] meta architecture. [1]: https://arxiv.org/abs/1801.04381 [2]: https://arxiv.org/abs/1904.07850 """ import tensorflow.compat.v1 as tf from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 class CenterNetMobileNetV2FeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The MobileNet V2 feature extractor for CenterNet.""" def __init__(self, mobilenet_v2_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Intializes the feature extractor. Args: mobilenet_v2_net: The underlying mobilenet_v2 network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetMobileNetV2FeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._network = mobilenet_v2_net output = self._network(self._network.input) # MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which # leads to a stride of 32. We perform upsampling to get it to a target # stride of 4. for num_filters in [256, 128, 64]: # 1. We use a simple convolution instead of a deformable convolution conv = tf.keras.layers.Conv2D( filters=num_filters, kernel_size=1, strides=1, padding='same') output = conv(output) output = tf.keras.layers.BatchNormalization()(output) output = tf.keras.layers.ReLU()(output) # 2. We use the default initialization for the convolution layers # instead of initializing it to do bilinear upsampling. conv_transpose = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=3, strides=2, padding='same') output = conv_transpose(output) output = tf.keras.layers.BatchNormalization()(output) output = tf.keras.layers.ReLU()(output) self._network = tf.keras.models.Model( inputs=self._network.input, outputs=output) def preprocess(self, resized_inputs): resized_inputs = super(CenterNetMobileNetV2FeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._network.load_weights(path) def call(self, inputs): return [self._network(inputs)] @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """The number of feature outputs returned by the feature extractor.""" return 1 @property def classification_backbone(self): return self._network def mobilenet_v2(channel_means, channel_stds, bgr_ordering, depth_multiplier=1.0, **kwargs): """The MobileNetV2 backbone for CenterNet.""" del kwargs # We set 'is_training' to True for now. network = mobilenetv2.mobilenet_v2( batchnorm_training=True, alpha=depth_multiplier, include_top=False, weights='imagenet' if depth_multiplier == 1.0 else None) return CenterNetMobileNetV2FeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_feature_extractor.py
center_net_mobilenet_v2_feature_extractor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobileNetV3 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v3 class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): """Base class of SSD feature extractor using MobilenetV3 features.""" def __init__(self, conv_defs, from_layer, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): """MobileNetV3 Feature Extractor for SSD Models. MobileNet v3. Details found in: https://arxiv.org/abs/1905.02244 Args: conv_defs: MobileNetV3 conv defs for backbone. from_layer: A cell of two layer names (string) to connect to the 1st and 2nd inputs of the SSD head. is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. scope_name: scope name (string) of network variables. """ super(SSDMobileNetV3FeatureExtractorBase, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams ) self._conv_defs = conv_defs self._from_layer = from_layer self._scope_name = scope_name def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError if conv_defs is not provided or from_layer does not meet the size requirement. """ if not self._conv_defs: raise ValueError('Must provide backbone conv defs.') if len(self._from_layer) != 2: raise ValueError('SSD input feature names are not provided.') preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': [ self._from_layer[0], self._from_layer[1], '', '', '', '' ], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope( self._scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v3.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), conv_defs=self._conv_defs, final_endpoint=self._from_layer[1], depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): """Mobilenet V3-Large feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): super(SSDMobileNetV3LargeFeatureExtractor, self).__init__( conv_defs=mobilenet_v3.V3_LARGE_DETECTION, from_layer=['layer_14/expansion_output', 'layer_17'], is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name ) class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): """Mobilenet V3-Small feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): super(SSDMobileNetV3SmallFeatureExtractor, self).__init__( conv_defs=mobilenet_v3.V3_SMALL_DETECTION, from_layer=['layer_10/expansion_output', 'layer_13'], is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name )
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v3_feature_extractor.py
ssd_mobilenet_v3_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing mobilenet_v2 feature extractor for CenterNet.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import center_net_mobilenet_v2_feature_extractor from object_detection.models.keras_models import mobilenet_v2 from object_detection.utils import test_case from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CenterNetMobileNetV2FeatureExtractorTest(test_case.TestCase): def test_center_net_mobilenet_v2_feature_extractor(self): net = mobilenet_v2.mobilenet_v2(True, include_top=False) model = center_net_mobilenet_v2_feature_extractor.CenterNetMobileNetV2FeatureExtractor( net) def graph_fn(): img = np.zeros((8, 224, 224, 3), dtype=np.float32) processed_img = model.preprocess(img) return model(processed_img) outputs = self.execute(graph_fn, []) self.assertEqual(outputs.shape, (8, 56, 56, 64)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py
center_net_mobilenet_v2_feature_extractor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd resnet v1 feature extractors.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_resnet_v1_ppn_feature_extractor from object_detection.models import ssd_resnet_v1_ppn_feature_extractor_testbase from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet50V1PpnFeatureExtractorTest( ssd_resnet_v1_ppn_feature_extractor_testbase. SSDResnetPpnFeatureExtractorTestBase): """SSDResnet50v1 feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False): min_depth = 32 is_training = True return ssd_resnet_v1_ppn_feature_extractor.SSDResnet50V1PpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding) def _scope_name(self): return 'resnet_v1_50' @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet101V1PpnFeatureExtractorTest( ssd_resnet_v1_ppn_feature_extractor_testbase. SSDResnetPpnFeatureExtractorTestBase): """SSDResnet101v1 feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False): min_depth = 32 is_training = True return ( ssd_resnet_v1_ppn_feature_extractor.SSDResnet101V1PpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def _scope_name(self): return 'resnet_v1_101' @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet152V1PpnFeatureExtractorTest( ssd_resnet_v1_ppn_feature_extractor_testbase. SSDResnetPpnFeatureExtractorTestBase): """SSDResnet152v1 feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False): min_depth = 32 is_training = True return ( ssd_resnet_v1_ppn_feature_extractor.SSDResnet152V1PpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def _scope_name(self): return 'resnet_v1_152' if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py
ssd_resnet_v1_ppn_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobilenetV2 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """MobileNetV2 Feature Extractor for SSD Models. Mobilenet v2 (experimental), designed by sandler@. More details can be found in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV2FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_feature_extractor.py
ssd_mobilenet_v2_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for Keras MobilenetV1 features.""" import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v1 from object_detection.utils import ops from object_detection.utils import shape_utils class SSDMobileNetV1KerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras MobilenetV1 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """Keras MobileNetV1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV1KerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } self.classification_backbone = None self._feature_map_generator = None def build(self, input_shape): full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) conv2d_11_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_11_relu').output conv2d_13_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_13_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v1.inputs, outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) self._feature_map_generator = ( feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=self._feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_maps = self._feature_map_generator({ 'Conv2d_11_pointwise': image_features[0], 'Conv2d_13_pointwise': image_features[1]}) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py
ssd_mobilenet_v1_keras_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd resnet v1 FPN feature extractors.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_resnet_v1_fpn_feature_extractor from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet50V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet50v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): is_training = True return ( ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def _resnet_scope_name(self): return 'resnet_v1_50' @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet101V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet101v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): is_training = True return ( ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def _resnet_scope_name(self): return 'resnet_v1_101' @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SSDResnet152V1FeatureExtractorTest( ssd_resnet_v1_fpn_feature_extractor_testbase. SSDResnetFPNFeatureExtractorTestBase): """SSDResnet152v1Fpn feature extractor test.""" def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): is_training = True return ( ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) def _resnet_scope_name(self): return 'resnet_v1_152' if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py
ssd_resnet_v1_fpn_feature_extractor_tf1_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v3_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import ssd_mobilenet_v3_feature_extractor from object_detection.models import ssd_mobilenet_v3_feature_extractor_testbase from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV3LargeFeatureExtractorTest( ssd_mobilenet_v3_feature_extractor_testbase ._SsdMobilenetV3FeatureExtractorTestBase): def _get_input_sizes(self): """Return first two input feature map sizes.""" return [672, 480] def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, use_keras=False): """Constructs a new Mobilenet V3-Large feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return ( ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3LargeFeatureExtractor( False, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV3SmallFeatureExtractorTest( ssd_mobilenet_v3_feature_extractor_testbase ._SsdMobilenetV3FeatureExtractorTestBase): def _get_input_sizes(self): """Return first two input feature map sizes.""" return [288, 288] def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, use_keras=False): """Constructs a new Mobilenet V3-Small feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return ( ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3SmallFeatureExtractor( False, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py
ssd_mobilenet_v3_feature_extractor_tf1_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet V2[1] + FPN[2] feature extractor for CenterNet[3] meta architecture. [1]: https://arxiv.org/abs/1801.04381 [2]: https://arxiv.org/abs/1612.03144. [3]: https://arxiv.org/abs/1904.07850 """ import tensorflow.compat.v1 as tf from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 _MOBILENET_V2_FPN_SKIP_LAYERS = [ 'block_2_add', 'block_5_add', 'block_9_add', 'out_relu' ] class CenterNetMobileNetV2FPNFeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The MobileNet V2 with FPN skip layers feature extractor for CenterNet.""" def __init__(self, mobilenet_v2_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False, use_separable_conv=False, upsampling_interpolation='nearest'): """Intializes the feature extractor. Args: mobilenet_v2_net: The underlying mobilenet_v2 network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. use_separable_conv: If set to True, all convolutional layers in the FPN network will be replaced by separable convolutions. upsampling_interpolation: A string (one of 'nearest' or 'bilinear') indicating which interpolation method to use for the upsampling ops in the FPN. """ super(CenterNetMobileNetV2FPNFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._base_model = mobilenet_v2_net output = self._base_model(self._base_model.input) # Add pyramid feature network on every layer that has stride 2. skip_outputs = [ self._base_model.get_layer(skip_layer_name).output for skip_layer_name in _MOBILENET_V2_FPN_SKIP_LAYERS ] self._fpn_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=skip_outputs) fpn_outputs = self._fpn_model(self._base_model.input) # Construct the top-down feature maps -- we start with an output of # 7x7x1280, which we continually upsample, apply a residual on and merge. # This results in a 56x56x24 output volume. top_layer = fpn_outputs[-1] # Use normal convolutional layer since the kernel_size is 1. residual_op = tf.keras.layers.Conv2D( filters=64, kernel_size=1, strides=1, padding='same') top_down = residual_op(top_layer) num_filters_list = [64, 32, 24] for i, num_filters in enumerate(num_filters_list): level_ind = len(num_filters_list) - 1 - i # Upsample. upsample_op = tf.keras.layers.UpSampling2D( 2, interpolation=upsampling_interpolation) top_down = upsample_op(top_down) # Residual (skip-connection) from bottom-up pathway. # Use normal convolutional layer since the kernel_size is 1. residual_op = tf.keras.layers.Conv2D( filters=num_filters, kernel_size=1, strides=1, padding='same') residual = residual_op(fpn_outputs[level_ind]) # Merge. top_down = top_down + residual next_num_filters = num_filters_list[i + 1] if i + 1 <= 2 else 24 if use_separable_conv: conv = tf.keras.layers.SeparableConv2D( filters=next_num_filters, kernel_size=3, strides=1, padding='same') else: conv = tf.keras.layers.Conv2D( filters=next_num_filters, kernel_size=3, strides=1, padding='same') top_down = conv(top_down) top_down = tf.keras.layers.BatchNormalization()(top_down) top_down = tf.keras.layers.ReLU()(top_down) output = top_down self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=output) def preprocess(self, resized_inputs): resized_inputs = super(CenterNetMobileNetV2FPNFeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) @property def supported_sub_model_types(self): return ['classification'] def get_sub_model(self, sub_model_type): if sub_model_type == 'classification': return self._base_model else: ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) def call(self, inputs): return [self._feature_extractor_model(inputs)] @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """The number of feature outputs returned by the feature extractor.""" return 1 def mobilenet_v2_fpn(channel_means, channel_stds, bgr_ordering, use_separable_conv=False, depth_multiplier=1.0, upsampling_interpolation='nearest', **kwargs): """The MobileNetV2+FPN backbone for CenterNet.""" del kwargs # Set to batchnorm_training to True for now. network = mobilenetv2.mobilenet_v2( batchnorm_training=True, alpha=depth_multiplier, include_top=False, weights='imagenet' if depth_multiplier == 1.0 else None) return CenterNetMobileNetV2FPNFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering, use_separable_conv=use_separable_conv, upsampling_interpolation=upsampling_interpolation)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py
center_net_mobilenet_v2_fpn_feature_extractor.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Keras-based MobilenetV1 FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v1 from object_detection.models.keras_models import model_utils from object_detection.utils import ops from object_detection.utils import shape_utils # A modified config of mobilenet v1 that makes it more detection friendly. def _create_modified_mobilenet_config(): conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512) conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256) return [conv_def_block_12, conv_def_block_13] class SSDMobileNetV1FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based MobilenetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Mobilenet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v1 layers {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, Conv2d_13_pointwise}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op self._feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, conv_defs=self._conv_defs, include_top=False) conv2d_3_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_3_relu').output conv2d_5_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_5_relu').output conv2d_11_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_11_relu').output conv2d_13_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_13_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v1.inputs, outputs=[conv2d_3_pointwise, conv2d_5_pointwise, conv2d_11_pointwise, conv2d_13_pointwise] ) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + 13) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append(self._feature_blocks[level - 2]) feature_start_index = len(self._feature_blocks) - self._num_levels fpn_input_image_features = [ (key, image_features[feature_start_index + index]) for index, key in enumerate(feature_block_list)] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( self._feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( self._feature_blocks[self._base_fpn_max_level - 2])] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map for restoring from an (object-based) checkpoint. Args: feature_extractor_scope: A scope name for the feature extractor (unused). Returns: A dict mapping keys to Keras models """ return {'feature_extractor': self.classification_backbone}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py
ssd_mobilenet_v1_fpn_keras_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Hourglass[1] feature extractor for CenterNet[2] meta architecture. [1]: https://arxiv.org/abs/1603.06937 [2]: https://arxiv.org/abs/1904.07850 """ from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import hourglass_network class CenterNetHourglassFeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The hourglass feature extractor for CenterNet. This class is a thin wrapper around the HourglassFeatureExtractor class along with some preprocessing methods inherited from the base class. """ def __init__(self, hourglass_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Intializes the feature extractor. Args: hourglass_net: The underlying hourglass network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetHourglassFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._network = hourglass_net def call(self, inputs): return self._network(inputs) @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" return self._network.num_hourglasses def hourglass_10(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-10 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_10(num_channels=32) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_20(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-20 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_20(num_channels=48) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_32(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-32 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_32(num_channels=48) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_52(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-52 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_52(num_channels=64) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_104(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-104 backbone for CenterNet.""" del kwargs # TODO(vighneshb): update hourglass_104 signature to match with other # hourglass networks. network = hourglass_network.hourglass_104() return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_hourglass_feature_extractor.py
center_net_hourglass_feature_extractor.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD MobilenetV1 FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 # A modified config of mobilenet v1 that makes it more detection friendly, def _create_modified_mobilenet_config(): conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS) conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512) conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256) return conv_defs class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Mobilenet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v1 layers {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, Conv2d_13_pointwise}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial( slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py
ssd_mobilenet_v1_fpn_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.ssd_inception_v3_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_inception_v3_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdInceptionV3FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, is_training=True): """Constructs a SsdInceptionV3FeatureExtractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. is_training: whether the network is in training mode. Returns: an ssd_inception_v3_feature_extractor.SsdInceptionV3FeatureExtractor. """ min_depth = 32 return ssd_inception_v3_feature_extractor.SSDInceptionV3FeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, num_layers=num_layers, override_base_feature_extractor_hyperparams=True) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), (2, 2, 2, 2048), (2, 1, 1, 512), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), (2, 2, 2, 2048), (2, 1, 1, 512), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 35, 35, 288), (2, 17, 17, 768), (2, 8, 8, 2048), (2, 4, 4, 512), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 35, 35, 128), (2, 17, 17, 128), (2, 8, 8, 192), (2, 4, 4, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 37, 37, 288), (2, 18, 18, 768), (2, 8, 8, 2048), (2, 4, 4, 512), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'InceptionV3' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), (2, 2, 2, 2048), (2, 1, 1, 512)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, num_layers=4) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py
ssd_inception_v3_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Keras-based ResnetV1 FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 from object_detection.utils import ops from object_detection.utils import shape_utils _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', 'conv4_block36_out', 'conv5_block3_out'], } class SSDResNetV1FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, resnet_v1_base_model, resnet_v1_base_model_name, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) if self._use_explicit_padding: raise ValueError('Explicit padding is not a valid option.') if self._use_depthwise: raise ValueError('Depthwise is not a valid option.') self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] outputs = [full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers depth = self._depth_fn(self._additional_layer_depth) for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D( depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, image_features))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) last_feature_map = fpn_features['top_down_block{}'.format( self._base_fpn_max_level - 1)] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps class SSDResNet50V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-50 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet50V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-50 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDResNet101V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-101 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet101V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-101 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDResNet152V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-152 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet152V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-152 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py
ssd_resnet_v1_fpn_keras_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ssd_efficientnet_bifpn_feature_extractor.""" import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v2 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models import ssd_efficientnet_bifpn_feature_extractor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version def _count_params(model, trainable_only=True): """Returns the count of all model parameters, or just trainable ones.""" if not trainable_only: return model.count_params() else: return int(np.sum([ tf.keras.backend.count_params(p) for p in model.trainable_weights])) @parameterized.parameters( {'efficientdet_version': 'efficientdet-d0', 'efficientnet_version': 'efficientnet-b0', 'bifpn_num_iterations': 3, 'bifpn_num_filters': 64, 'bifpn_combine_method': 'fast_attention'}, {'efficientdet_version': 'efficientdet-d1', 'efficientnet_version': 'efficientnet-b1', 'bifpn_num_iterations': 4, 'bifpn_num_filters': 88, 'bifpn_combine_method': 'fast_attention'}, {'efficientdet_version': 'efficientdet-d2', 'efficientnet_version': 'efficientnet-b2', 'bifpn_num_iterations': 5, 'bifpn_num_filters': 112, 'bifpn_combine_method': 'fast_attention'}, {'efficientdet_version': 'efficientdet-d3', 'efficientnet_version': 'efficientnet-b3', 'bifpn_num_iterations': 6, 'bifpn_num_filters': 160, 'bifpn_combine_method': 'fast_attention'}, {'efficientdet_version': 'efficientdet-d4', 'efficientnet_version': 'efficientnet-b4', 'bifpn_num_iterations': 7, 'bifpn_num_filters': 224, 'bifpn_combine_method': 'fast_attention'}, {'efficientdet_version': 'efficientdet-d5', 'efficientnet_version': 'efficientnet-b5', 'bifpn_num_iterations': 7, 'bifpn_num_filters': 288, 'bifpn_combine_method': 'fast_attention'}, # efficientdet-d6 and efficientdet-d7 only differ in input size. {'efficientdet_version': 'efficientdet-d6-d7', 'efficientnet_version': 'efficientnet-b6', 'bifpn_num_iterations': 8, 'bifpn_num_filters': 384, 'bifpn_combine_method': 'sum'}) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class SSDEfficientNetBiFPNFeatureExtractorTest( test_case.TestCase, parameterized.TestCase): def _build_conv_hyperparams(self, add_batch_norm=True): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ force_use_bias: true activation: SWISH regularizer { l2_regularizer { weight: 0.0004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } """ if add_batch_norm: batch_norm_proto = """ batch_norm { scale: true, decay: 0.99, epsilon: 0.001, } """ conv_hyperparams_text_proto += batch_norm_proto text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _create_feature_extractor(self, efficientnet_version='efficientnet-b0', bifpn_num_iterations=3, bifpn_num_filters=64, bifpn_combine_method='fast_attention'): """Constructs a new EfficientNetBiFPN feature extractor.""" depth_multiplier = 1.0 pad_to_multiple = 1 min_depth = 16 return (ssd_efficientnet_bifpn_feature_extractor .SSDEfficientNetBiFPNKerasFeatureExtractor( is_training=True, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version=efficientnet_version)) def test_efficientdet_feature_extractor_shapes(self, efficientdet_version, efficientnet_version, bifpn_num_iterations, bifpn_num_filters, bifpn_combine_method): feature_extractor = self._create_feature_extractor( efficientnet_version=efficientnet_version, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method) outputs = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32)) self.assertEqual(outputs[0].shape, (2, 32, 32, bifpn_num_filters)) self.assertEqual(outputs[1].shape, (2, 16, 16, bifpn_num_filters)) self.assertEqual(outputs[2].shape, (2, 8, 8, bifpn_num_filters)) self.assertEqual(outputs[3].shape, (2, 4, 4, bifpn_num_filters)) self.assertEqual(outputs[4].shape, (2, 2, 2, bifpn_num_filters)) def test_efficientdet_feature_extractor_params(self, efficientdet_version, efficientnet_version, bifpn_num_iterations, bifpn_num_filters, bifpn_combine_method): feature_extractor = self._create_feature_extractor( efficientnet_version=efficientnet_version, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method) _ = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32)) expected_params = { 'efficientdet-d0': 5484829, 'efficientdet-d1': 8185156, 'efficientdet-d2': 9818153, 'efficientdet-d3': 13792706, 'efficientdet-d4': 22691445, 'efficientdet-d5': 35795677, 'efficientdet-d6-d7': 53624512, } num_params = _count_params(feature_extractor) self.assertEqual(expected_params[efficientdet_version], num_params) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py
ssd_efficientnet_bifpn_feature_extractor_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v1_fpn_feature_extractor. By using parameterized test decorator, this test serves for both Slim-based and Keras-based Mobilenet V1 FPN feature extractors in SSD. """ import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdMobilenetV1FpnFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False, use_keras=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ del use_keras min_depth = 32 return (ssd_mobilenet_v1_fpn_feature_extractor. SSDMobileNetV1FpnFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_depthwise=True, use_explicit_padding=use_explicit_padding)) def test_extract_features_returns_correct_shapes_256(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_384(self): image_height = 320 image_width = 320 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_with_dynamic_image_shape(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), (2, 10, 10, 256), (2, 5, 5, 256), (2, 3, 3, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_returns_correct_shapes_enforcing_min_depth( self): image_height = 256 image_width = 256 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), (2, 2, 2, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False, use_keras=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True, use_keras=False) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=False) def test_preprocess_returns_correct_value_range(self): image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV1' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name, use_keras=False) def test_variable_count(self): depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=False) self.assertEqual(len(variables), 153) def test_fused_batchnorm(self): image_height = 256 image_width = 256 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue( any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations())) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py
ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base test class for ssd_mobilenet_edgetpu_feature_extractor.""" import abc import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test class _SsdMobilenetEdgeTPUFeatureExtractorTestBase( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): """Base class for MobilenetEdgeTPU tests.""" @abc.abstractmethod def _get_input_sizes(self): """Return feature map sizes for the two inputs to SSD head.""" pass def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), (2, 4, 4, input_feature_sizes[1]), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=False) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), (2, 10, 10, input_feature_sizes[1]), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=False) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 input_feature_sizes = self._get_input_sizes() expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), (2, 10, 10, input_feature_sizes[1]), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(4, image_height, image_width, 3) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_has_fused_batchnorm(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=False) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue(any('FusedBatchNorm' in op.type for op in tf.get_default_graph().get_operations()))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py
ssd_mobilenet_edgetpu_feature_extractor_testbase.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobileNetEdgeTPU features.""" from object_detection.models import ssd_mobilenet_v3_feature_extractor from nets.mobilenet import mobilenet_v3 class SSDMobileNetEdgeTPUFeatureExtractor( ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3FeatureExtractorBase): """MobileNetEdgeTPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetEdgeTPU'): super(SSDMobileNetEdgeTPUFeatureExtractor, self).__init__( conv_defs=mobilenet_v3.V3_EDGETPU, from_layer=['layer_18/expansion_output', 'layer_23'], is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name )
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py
ssd_mobilenet_edgetpu_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd resnet v1 FPN feature extractors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.utils import test_utils class SSDResnetFPNFeatureExtractorTestBase( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): """Helper test class for SSD Resnet v1 FPN feature extractors.""" @abc.abstractmethod def _resnet_scope_name(self): pass @abc.abstractmethod def _fpn_scope_name(self): return 'fpn' @abc.abstractmethod def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, min_depth=32, use_keras=False): pass def test_extract_features_returns_correct_shapes_256(self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=self.is_tf2()) def test_extract_features_returns_correct_shapes_with_dynamic_inputs( self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=self.is_tf2()) def test_extract_features_returns_correct_shapes_with_depth_multiplier( self): image_height = 256 image_width = 256 depth_multiplier = 0.5 expected_num_channels = int(256 * depth_multiplier) pad_to_multiple = 1 expected_feature_map_shape = [(2, 32, 32, expected_num_channels), (2, 16, 16, expected_num_channels), (2, 8, 8, expected_num_channels), (2, 4, 4, expected_num_channels), (2, 2, 2, expected_num_channels)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=self.is_tf2()) def test_extract_features_returns_correct_shapes_with_min_depth( self): image_height = 256 image_width = 256 depth_multiplier = 1.0 pad_to_multiple = 1 min_depth = 320 expected_feature_map_shape = [(2, 32, 32, min_depth), (2, 16, 16, min_depth), (2, 8, 8, min_depth), (2, 4, 4, min_depth), (2, 2, 2, min_depth)] with test_utils.GraphContextOrNone() as g: image_tensor = tf.random.uniform([2, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, min_depth=min_depth, use_keras=self.is_tf2()) def graph_fn(): if self.is_tf2(): return feature_extractor(image_tensor) return feature_extractor.extract_features(image_tensor) feature_maps = self.execute(graph_fn, [], graph=g) for feature_map, expected_shape in zip(feature_maps, expected_feature_map_shape): self.assertAllEqual(feature_map.shape, expected_shape) def test_extract_features_returns_correct_shapes_with_pad_to_multiple( self): image_height = 254 image_width = 254 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 4, 4, 256), (2, 2, 2, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_keras=self.is_tf2()) def test_extract_features_raises_error_with_invalid_image_size( self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=self.is_tf2()) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image_np = np.random.rand(4, image_height, image_width, 3) with test_utils.GraphContextOrNone() as g: test_image = tf.constant(test_image_np) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, use_keras=self.is_tf2()) def graph_fn(): preprocessed_image = feature_extractor.preprocess(test_image) return preprocessed_image preprocessed_image_out = self.execute(graph_fn, [], graph=g) self.assertAllClose(preprocessed_image_out, test_image_np - [[123.68, 116.779, 103.939]]) def test_variables_only_created_in_scope(self): if self.is_tf2(): self.skipTest('test_variables_only_created_in_scope is only tf1') depth_multiplier = 1 pad_to_multiple = 1 scope_name = self._resnet_scope_name() self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name, use_keras=self.is_tf2()) def test_variable_count(self): if self.is_tf2(): self.skipTest('test_variable_count is only tf1') depth_multiplier = 1 pad_to_multiple = 1 variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=self.is_tf2()) # The number of expected variables in resnet_v1_50, resnet_v1_101, # and resnet_v1_152 is 279, 534, and 789 respectively. expected_variables_len = 279 scope_name = self._resnet_scope_name() if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'): expected_variables_len = 534 elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'): expected_variables_len = 789 self.assertEqual(len(variables), expected_variables_len)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py
ssd_resnet_v1_fpn_feature_extractor_testbase.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_pnas_feature_extractor.""" import unittest import numpy as np import tensorflow.compat.v1 as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_pnasnet_feature_extractor from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class SsdPnasNetFeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, is_training=True): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. is_training: whether the network is in training mode. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return ssd_pnasnet_feature_extractor.SSDPNASNetFeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding, num_layers=num_layers) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 2160), (2, 10, 10, 4320), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_extract_features_with_fewer_layers(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), (2, 2, 2, 512), (2, 1, 1, 256)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, num_layers=4) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py
ssd_pnasnet_feature_extractor_tf1_test.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD MobilenetV2 FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 # A modified config of mobilenet v2 that makes it more detection friendly. def _create_modified_mobilenet_config(): conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF) conv_defs['spec'][-1] = mobilenet.op( slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256) return conv_defs class SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Mobilenet v2 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v2 layers {layer_4, layer_7, layer_14, layer_19}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV2FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'layer_4', 'layer_7', 'layer_14', 'layer_19' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial( slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19)) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py
ssd_mobilenet_v2_fpn_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_pnas_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return frcnn_pnas.FasterRCNNPNASFeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 299, 299, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 19, 19, 4320]) def test_extract_proposal_features_input_size_224(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 14, 14, 4320]) def test_extract_proposal_features_input_size_112(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 4320]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [2, 17, 17, 1088], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [2, 9, 9, 4320]) def test_filter_scaling_computation(self): expected_filter_scaling = { ((4, 8), 2): 1.0, ((4, 8), 7): 2.0, ((4, 8), 8): 2.0, ((4, 8), 9): 4.0 } for args, filter_scaling in expected_filter_scaling.items(): reduction_indices, start_cell_num = args self.assertAlmostEqual( frcnn_pnas._filter_scaling(reduction_indices, start_cell_num), filter_scaling) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py
faster_rcnn_pnas_feature_extractor_tf1_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resnetv2 based feature extractors for CenterNet[1] meta architecture. [1]: https://arxiv.org/abs/1904.07850 """ import tensorflow.compat.v1 as tf from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor): """Resnet v2 base feature extractor for the CenterNet model.""" def __init__(self, resnet_type, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes the feature extractor with a specific ResNet architecture. Args: resnet_type: A string specifying which kind of ResNet to use. Currently only `resnet_v2_50` and `resnet_v2_101` are supported. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetResnetFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) if resnet_type == 'resnet_v2_101': self._base_model = tf.keras.applications.ResNet101V2(weights=None, include_top=False) output_layer = 'conv5_block3_out' elif resnet_type == 'resnet_v2_50': self._base_model = tf.keras.applications.ResNet50V2(weights=None, include_top=False) output_layer = 'conv5_block3_out' else: raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) output_layer = self._base_model.get_layer(output_layer) self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, outputs=output_layer.output) resnet_output = self._resnet_model(self._base_model.input) for num_filters in [256, 128, 64]: # TODO(vighneshb) This section has a few differences from the paper # Figure out how much of a performance impact they have. # 1. We use a simple convolution instead of a deformable convolution conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3, strides=1, padding='same') resnet_output = conv(resnet_output) resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) resnet_output = tf.keras.layers.ReLU()(resnet_output) # 2. We use the default initialization for the convolution layers # instead of initializing it to do bilinear upsampling. conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters, kernel_size=3, strides=2, padding='same') resnet_output = conv_transpose(resnet_output) resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) resnet_output = tf.keras.layers.ReLU()(resnet_output) self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=resnet_output) def preprocess(self, resized_inputs): """Preprocess input images for the ResNet model. This scales images in the range [0, 255] to the range [-1, 1] Args: resized_inputs: a [batch, height, width, channels] float32 tensor. Returns: outputs: a [batch, height, width, channels] float32 tensor. """ resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess( resized_inputs) return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) def call(self, inputs): """Returns image features extracted by the backbone. Args: inputs: An image tensor of shape [batch_size, input_height, input_width, 3] Returns: features_list: A list of length 1 containing a tensor of shape [batch_size, input_height // 4, input_width // 4, 64] containing the features extracted by the ResNet. """ return [self._feature_extractor_model(inputs)] @property def num_feature_outputs(self): return 1 @property def out_stride(self): return 4 @property def classification_backbone(self): return self._base_model def resnet_v2_101(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v2 101 feature extractor.""" del kwargs return CenterNetResnetFeatureExtractor( resnet_type='resnet_v2_101', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v2_50(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v2 50 feature extractor.""" del kwargs return CenterNetResnetFeatureExtractor( resnet_type='resnet_v2_50', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_feature_extractor.py
center_net_resnet_feature_extractor.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_resnet_keras_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, architecture='resnet_v1_50'): return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor( is_training=False, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shape = tf.shape(rpn_feature_map) self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shape = tf.shape(rpn_feature_map) self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024]) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(tf.errors.InvalidArgumentError): feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() proposal_feature_maps = tf.random_uniform( [3, 7, 7, 1024], maxval=255, dtype=tf.float32) model = feature_extractor.get_box_classifier_feature_extractor_model( name='TestScope') proposal_classifier_features = ( model(proposal_feature_maps)) features_shape = tf.shape(proposal_classifier_features) # Note: due to a slight mismatch in slim and keras resnet definitions # the output shape of the box classifier is slightly different compared to # that of the slim implementation. The keras version is more `canonical` # in that it more accurately reflects the original authors' implementation. # TODO(jonathanhuang): make the output shape match that of the slim # implementation by using atrous convolutions. self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048]) if __name__ == '__main__': tf.enable_v2_behavior() tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD Keras-based EfficientNet + BiFPN (EfficientDet) Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging from six.moves import range from six.moves import zip import tensorflow.compat.v2 as tf from tensorflow.python.keras import backend as keras_backend from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from official.vision.image_classification.efficientnet import efficientnet_model _EFFICIENTNET_LEVEL_ENDPOINTS = { 1: 'stack_0/block_0/project_bn', 2: 'stack_1/block_1/add', 3: 'stack_2/block_1/add', 4: 'stack_4/block_2/add', 5: 'stack_6/block_0/project_bn', } class SSDEfficientNetBiFPNKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Keras-based EfficientNetBiFPN (EfficientDet) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level, bifpn_max_level, bifpn_num_iterations, bifpn_num_filters, bifpn_combine_method, efficientnet_version, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name=None): """SSD Keras-based EfficientNetBiFPN (EfficientDet) feature extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. efficientnet_version: the EfficientNet version to use for this feature extractor's backbone. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: Whether to override the efficientnet backbone's default weight decay with the weight decay defined by `conv_hyperparams`. Note, only overriding of weight decay is currently supported. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetBiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) if depth_multiplier != 1.0: raise ValueError('EfficientNetBiFPN does not support a non-default ' 'depth_multiplier.') if use_explicit_padding: raise ValueError('EfficientNetBiFPN does not support explicit padding.') if use_depthwise: raise ValueError('EfficientNetBiFPN does not support use_depthwise.') self._bifpn_min_level = bifpn_min_level self._bifpn_max_level = bifpn_max_level self._bifpn_num_iterations = bifpn_num_iterations self._bifpn_num_filters = max(bifpn_num_filters, min_depth) self._bifpn_node_params = {'combine_method': bifpn_combine_method} self._efficientnet_version = efficientnet_version logging.info('EfficientDet EfficientNet backbone version: %s', self._efficientnet_version) logging.info('EfficientDet BiFPN num filters: %d', self._bifpn_num_filters) logging.info('EfficientDet BiFPN num iterations: %d', self._bifpn_num_iterations) self._backbone_max_level = min( max(_EFFICIENTNET_LEVEL_ENDPOINTS.keys()), bifpn_max_level) self._output_layer_names = [ _EFFICIENTNET_LEVEL_ENDPOINTS[i] for i in range(bifpn_min_level, self._backbone_max_level + 1)] self._output_layer_alias = [ 'level_{}'.format(i) for i in range(bifpn_min_level, self._backbone_max_level + 1)] # Initialize the EfficientNet backbone. # Note, this is currently done in the init method rather than in the build # method, since doing so introduces an error which is not well understood. efficientnet_overrides = {'rescale_input': False} if override_base_feature_extractor_hyperparams: efficientnet_overrides[ 'weight_decay'] = conv_hyperparams.get_regularizer_weight() if (conv_hyperparams.use_sync_batch_norm() and keras_backend.is_tpu_strategy(tf.distribute.get_strategy())): efficientnet_overrides['batch_norm'] = 'tpu' efficientnet_base = efficientnet_model.EfficientNet.from_name( model_name=self._efficientnet_version, overrides=efficientnet_overrides) outputs = [efficientnet_base.get_layer(output_layer_name).output for output_layer_name in self._output_layer_names] self._efficientnet = tf.keras.Model( inputs=efficientnet_base.inputs, outputs=outputs) self.classification_backbone = efficientnet_base self._bifpn_stage = None def build(self, input_shape): self._bifpn_stage = bifpn_generators.KerasBiFpnFeatureMaps( bifpn_num_iterations=self._bifpn_num_iterations, bifpn_num_filters=self._bifpn_num_filters, fpn_min_level=self._bifpn_min_level, fpn_max_level=self._bifpn_max_level, input_max_level=self._backbone_max_level, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, bifpn_node_params=self._bifpn_node_params, name='bifpn') self.built = True def preprocess(self, inputs): """SSD preprocessing. Channel-wise mean subtraction and scaling. Args: inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if inputs.shape.as_list()[3] == 3: # Input images are expected to be in the range [0, 255]. channel_offset = [0.485, 0.456, 0.406] channel_scale = [0.229, 0.224, 0.225] return ((inputs / 255.0) - [[channel_offset]]) / [[channel_scale]] else: return inputs def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) base_feature_maps = self._efficientnet( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) output_feature_map_dict = self._bifpn_stage( list(zip(self._output_layer_alias, base_feature_maps))) return list(output_feature_map_dict.values()) class SSDEfficientNetB0BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=3, bifpn_num_filters=64, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D0'): """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB0BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b0', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB1BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=4, bifpn_num_filters=88, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D1'): """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB1BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b1', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB2BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=5, bifpn_num_filters=112, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D2'): """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB2BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b2', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB3BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=6, bifpn_num_filters=160, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D3'): """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB3BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b3', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB4BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=7, bifpn_num_filters=224, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D4'): """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB4BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b4', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB5BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=7, bifpn_num_filters=288, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D5'): """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB5BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b5', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB6BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=8, bifpn_num_filters=384, bifpn_combine_method='sum', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D6-D7'): """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor. SSD Keras EfficientNet-b6 BiFPN Feature Extractor, a.k.a. EfficientDet-d6 and EfficientDet-d7. The EfficientDet-d[6,7] models use the same backbone EfficientNet-b6 and the same BiFPN architecture, and therefore have the same number of parameters. They only differ in their input resolutions. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB6BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b6', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB7BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b7 BiFPN Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=8, bifpn_num_filters=384, bifpn_combine_method='sum', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientNet-B7_BiFPN'): """SSD Keras EfficientNet-b7 BiFPN Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB7BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b7', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py
ssd_efficientnet_bifpn_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """PNASNet Faster R-CNN implementation. Based on PNASNet model: https://arxiv.org/abs/1712.00559 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper from nets.nasnet import nasnet_utils try: from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass arg_scope = slim.arg_scope def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc def _filter_scaling(reduction_indices, start_cell_num): """Compute the expected filter scaling at given PNASNet cell start_cell_num. In the pnasnet.py code, filter_scaling starts at 1.0. We instead adapt filter scaling to depend on the starting cell. At first cells, before any reduction, filter_scalling is 1.0. With passing any reduction cell, the filter_scaling is multiplied by 2. Args: reduction_indices: list of int indices. start_cell_num: int. Returns: filter_scaling: float. """ filter_scaling = 1.0 for ind in reduction_indices: if ind < start_cell_num: filter_scaling *= 2.0 return filter_scaling # Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but # with special edits to remove instantiation of the stem and the special # ability to receive as input a pair of hidden states. It constructs only # a sub-network from the original PNASNet model, starting from the # start_cell_num cell and with modified final layer. def _build_pnasnet_base( hidden_previous, hidden, normal_cell, hparams, true_cell_num, start_cell_num): """Constructs a PNASNet image model for proposal classifier features.""" # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) filter_scaling = _filter_scaling(reduction_indices, start_cell_num) # Note: The None is prepended to match the behavior of _imagenet_stem() cell_outputs = [None, hidden_previous, hidden] net = hidden # Run the cells for cell_num in range(start_cell_num, hparams.num_cells): is_reduction = cell_num in reduction_indices stride = 2 if is_reduction else 1 if is_reduction: filter_scaling *= hparams.filter_scaling_rate prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) # Final nonlinearity. # Note that we have dropped the final pooling, dropout and softmax layers # from the default pnasnet version. with tf.variable_scope('final_layer'): net = tf.nn.relu(net) return net # TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet # featurization. The reason for this is that pnasnet.py only supports # inputs with fully known shapes. We need to update pnasnet.py to handle # shapes not known at compile time. class FasterRCNNPNASFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with PNASNet feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNPNASFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with PNAS preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the PNASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = pnasnet.build_pnasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_7') # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. # Cell_7 is the last cell before second reduction. rpn_feature_map = tf.concat([end_points['Cell_6'], end_points['Cell_7']], 3) # pnasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the PNASNet network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Number of used stem cells. num_stem_cells = 2 # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_pnasnet_large() within # pnasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = pnasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network total_num_cells = hparams.num_cells + num_stem_cells normal_cell = pnasnet.PNasNetNormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_7' used by # _extract_proposal_features(). start_cell_num = 8 true_cell_num = start_cell_num + num_stem_cells with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net = _build_pnasnet_base( hidden_previous, hidden, normal_cell=normal_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for PNASNet checkpoints. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_pnas_feature_extractor.py
faster_rcnn_pnas_feature_extractor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resnetv1 FPN [1] based feature extractors for CenterNet[2] meta architecture. [1]: https://arxiv.org/abs/1612.03144. [2]: https://arxiv.org/abs/1904.07850. """ import tensorflow.compat.v1 as tf from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor from object_detection.models.keras_models import resnet_v1 _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_18': ['conv2_block2_out', 'conv3_block2_out', 'conv4_block2_out', 'conv5_block2_out'], 'resnet_v1_34': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], } class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor): """Resnet v1 FPN base feature extractor for the CenterNet model. This feature extractor uses residual skip connections and nearest neighbor upsampling to produce an output feature map of stride 4, which has precise localization information along with strong semantic information from the top of the net. This design does not exactly follow the original FPN design, specifically: - Since only one output map is necessary for heatmap prediction (stride 4 output), the top-down feature maps can have different numbers of channels. Specifically, the top down feature maps have the following sizes: [h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256]. - No additional coarse features are used after conv5_x. """ def __init__(self, resnet_type, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes the feature extractor with a specific ResNet architecture. Args: resnet_type: A string specifying which kind of ResNet to use. Currently only `resnet_v1_50` and `resnet_v1_101` are supported. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetResnetV1FpnFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) if resnet_type == 'resnet_v1_50': self._base_model = tf.keras.applications.ResNet50(weights=None, include_top=False) elif resnet_type == 'resnet_v1_101': self._base_model = tf.keras.applications.ResNet101(weights=None, include_top=False) elif resnet_type == 'resnet_v1_18': self._base_model = resnet_v1.resnet_v1_18(weights=None, include_top=False) elif resnet_type == 'resnet_v1_34': self._base_model = resnet_v1.resnet_v1_34(weights=None, include_top=False) else: raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type] outputs = [self._base_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, outputs=outputs) resnet_outputs = self._resnet_model(self._base_model.input) # Construct the top-down feature maps. top_layer = resnet_outputs[-1] residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1, strides=1, padding='same') top_down = residual_op(top_layer) num_filters_list = [256, 128, 64] for i, num_filters in enumerate(num_filters_list): level_ind = 2 - i # Upsample. upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest') top_down = upsample_op(top_down) # Residual (skip-connection) from bottom-up pathway. residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1, strides=1, padding='same') residual = residual_op(resnet_outputs[level_ind]) # Merge. top_down = top_down + residual next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64 conv = tf.keras.layers.Conv2D(filters=next_num_filters, kernel_size=3, strides=1, padding='same') top_down = conv(top_down) top_down = tf.keras.layers.BatchNormalization()(top_down) top_down = tf.keras.layers.ReLU()(top_down) self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=top_down) def preprocess(self, resized_inputs): """Preprocess input images for the ResNet model. This scales images in the range [0, 255] to the range [-1, 1] Args: resized_inputs: a [batch, height, width, channels] float32 tensor. Returns: outputs: a [batch, height, width, channels] float32 tensor. """ resized_inputs = super( CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.resnet.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) def call(self, inputs): """Returns image features extracted by the backbone. Args: inputs: An image tensor of shape [batch_size, input_height, input_width, 3] Returns: features_list: A list of length 1 containing a tensor of shape [batch_size, input_height // 4, input_width // 4, 64] containing the features extracted by the ResNet. """ return [self._feature_extractor_model(inputs)] @property def num_feature_outputs(self): return 1 @property def out_stride(self): return 4 @property def classification_backbone(self): return self._base_model def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 101 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_101', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 50 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_50', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def resnet_v1_34_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 34 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_34', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v1_18_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 18 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_18', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py
center_net_resnet_v1_fpn_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for InceptionV3 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils from nets import inception_v3 class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using InceptionV3 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """InceptionV3 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: If `override_base_feature_extractor_hyperparams` is False. """ super(SSDInceptionV3FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if not self._override_base_feature_extractor_hyperparams: raise ValueError('SSD Inception V3 feature extractor always uses' 'scope returned by `conv_hyperparams_fn` for both the ' 'base feature extractor and the additional layers ' 'added since there is no arg_scope defined for the base ' 'feature extractor.') def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, -1, 512, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v3_feature_extractor.py
ssd_inception_v3_feature_extractor.py
# Lint as: python2, python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base test class SSDFeatureExtractors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import test_utils class SsdFeatureExtractorTestBase(test_case.TestCase): def _build_conv_hyperparams(self, add_batch_norm=True): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ if add_batch_norm: batch_norm_proto = """ batch_norm { scale: false } """ conv_hyperparams_text_proto += batch_norm_proto text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def conv_hyperparams_fn(self): with slim.arg_scope([]) as sc: return sc @abstractmethod def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, num_layers=6, use_keras=False, use_depthwise=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. use_explicit_padding: use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. num_layers: number of SSD layers. use_keras: if True builds a keras-based feature extractor, if False builds a slim-based one. use_depthwise: Whether to use depthwise convolutions. Returns: an ssd_meta_arch.SSDFeatureExtractor or an ssd_meta_arch.SSDKerasFeatureExtractor object. """ pass def _create_features(self, depth_multiplier, pad_to_multiple, use_explicit_padding=False, use_depthwise=False, num_layers=6, use_keras=False): kwargs = {} if use_explicit_padding: kwargs.update({'use_explicit_padding': use_explicit_padding}) if use_depthwise: kwargs.update({'use_depthwise': use_depthwise}) if num_layers != 6: kwargs.update({'num_layers': num_layers}) if use_keras: kwargs.update({'use_keras': use_keras}) feature_extractor = self._create_feature_extractor( depth_multiplier, pad_to_multiple, **kwargs) return feature_extractor def _extract_features(self, image_tensor, feature_extractor, use_keras=False): if use_keras: feature_maps = feature_extractor(image_tensor) else: feature_maps = feature_extractor.extract_features(image_tensor) return feature_maps def check_extract_features_returns_correct_shape(self, batch_size, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False, num_layers=6, use_keras=False, use_depthwise=False): with test_utils.GraphContextOrNone() as g: feature_extractor = self._create_features( depth_multiplier, pad_to_multiple, use_explicit_padding=use_explicit_padding, num_layers=num_layers, use_keras=use_keras, use_depthwise=use_depthwise) def graph_fn(image_tensor): return self._extract_features( image_tensor, feature_extractor, use_keras=use_keras) image_tensor = np.random.rand(batch_size, image_height, image_width, 3).astype(np.float32) feature_maps = self.execute(graph_fn, [image_tensor], graph=g) for feature_map, expected_shape in zip( feature_maps, expected_feature_map_shapes): self.assertAllEqual(feature_map.shape, expected_shape) def check_extract_features_returns_correct_shapes_with_dynamic_inputs( self, batch_size, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False, num_layers=6, use_keras=False, use_depthwise=False): with test_utils.GraphContextOrNone() as g: feature_extractor = self._create_features( depth_multiplier, pad_to_multiple, use_explicit_padding=use_explicit_padding, num_layers=num_layers, use_keras=use_keras, use_depthwise=use_depthwise) def graph_fn(image_height, image_width): image_tensor = tf.random_uniform([batch_size, image_height, image_width, 3], dtype=tf.float32) return self._extract_features( image_tensor, feature_extractor, use_keras=use_keras) feature_maps = self.execute_cpu(graph_fn, [ np.array(image_height, dtype=np.int32), np.array(image_width, dtype=np.int32) ], graph=g) for feature_map, expected_shape in zip( feature_maps, expected_feature_map_shapes): self.assertAllEqual(feature_map.shape, expected_shape) def check_extract_features_raises_error_with_invalid_image_size( self, image_height, image_width, depth_multiplier, pad_to_multiple, use_keras=False, use_depthwise=False): with test_utils.GraphContextOrNone() as g: batch = 4 width = tf.random.uniform([], minval=image_width, maxval=image_width+1, dtype=tf.int32) height = tf.random.uniform([], minval=image_height, maxval=image_height+1, dtype=tf.int32) shape = tf.stack([batch, height, width, 3]) preprocessed_inputs = tf.random.uniform(shape) feature_extractor = self._create_features( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) def graph_fn(): feature_maps = self._extract_features( preprocessed_inputs, feature_extractor, use_keras=use_keras) return feature_maps if self.is_tf2(): with self.assertRaises(ValueError): self.execute_cpu(graph_fn, [], graph=g) else: with self.assertRaises(tf.errors.InvalidArgumentError): self.execute_cpu(graph_fn, [], graph=g) def check_feature_extractor_variables_under_scope(self, depth_multiplier, pad_to_multiple, scope_name, use_keras=False, use_depthwise=False): variables = self.get_feature_extractor_variables( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) for variable in variables: self.assertTrue(variable.name.startswith(scope_name)) def get_feature_extractor_variables(self, depth_multiplier, pad_to_multiple, use_keras=False, use_depthwise=False): g = tf.Graph() with g.as_default(): feature_extractor = self._create_features( depth_multiplier, pad_to_multiple, use_keras=use_keras, use_depthwise=use_depthwise) preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) self._extract_features( preprocessed_inputs, feature_extractor, use_keras=use_keras) return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_feature_extractor_test.py
ssd_feature_extractor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_nas_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return frcnn_nas.FasterRCNNNASFeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 299, 299, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 19, 19, 4032]) def test_extract_proposal_features_input_size_224(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 14, 14, 4032]) def test_extract_proposal_features_input_size_112(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 4032]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [2, 17, 17, 1088], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [2, 9, 9, 4032]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py
faster_rcnn_nas_feature_extractor_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resnet V1 Faster R-CNN implementation. See "Deep Residual Learning for Image Recognition" by He et al., 2015. https://arxiv.org/abs/1512.03385 Note: this implementation assumes that the classification checkpoint used to finetune this model is trained using the same configuration as that of the MSRA provided checkpoints (see https://github.com/KaimingHe/deep-residual-networks), e.g., with same preprocessing, batch norm scaling, etc. """ import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import resnet_utils from nets import resnet_v1 class FasterRCNNResnetV1FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Resnet V1 feature extractor implementation.""" def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: architecture: Architecture name of the Resnet V1 model. resnet_model: Definition of the Resnet V1 model. is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: Activaton functon to use in Resnet V1 model. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._architecture = architecture self._resnet_model = resnet_model self._activation_fn = activation_fn super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) shape_assert = tf.Assert( tf.logical_and( tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): # Disables batchnorm for fine-tuning with smaller batch sizes. # TODO(chensun): Figure out if it is needed when image # batch size is bigger. with slim.arg_scope( resnet_utils.resnet_arg_scope( batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=self._activation_fn, weight_decay=self._weight_decay)): with tf.variable_scope( self._architecture, reuse=self._reuse_weights) as var_scope: _, activations = self._resnet_model( preprocessed_inputs, num_classes=None, is_training=self._train_batch_norm, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope) handle = scope + '/%s/block3' % self._architecture return activations[handle], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope(self._architecture, reuse=self._reuse_weights): with slim.arg_scope( resnet_utils.resnet_arg_scope( batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=self._activation_fn, weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): blocks = [ resnet_utils.Block('block4', resnet_v1.bottleneck, [{ 'depth': 2048, 'depth_bottleneck': 512, 'stride': 1 }] * 3) ] proposal_classifier_features = resnet_utils.stack_blocks_dense( proposal_feature_maps, blocks) return proposal_classifier_features class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 50 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet50FeatureExtractor, self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn) class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 101 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet101FeatureExtractor, self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn) class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 152 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet152FeatureExtractor, self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py
faster_rcnn_resnet_v1_feature_extractor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor.""" import unittest import tensorflow.compat.v1 as tf from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 299, 299, 3], maxval=255, dtype=tf.float32) rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shape = tf.shape(rpn_feature_map) self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shape = tf.shape(rpn_feature_map) self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shape = tf.shape(rpn_feature_map) self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088]) def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [2, 17, 17, 1088], maxval=255, dtype=tf.float32) model = feature_extractor.get_box_classifier_feature_extractor_model( name='TestScope') proposal_classifier_features = ( model(proposal_feature_maps)) features_shape = tf.shape(proposal_classifier_features) self.assertAllEqual(features_shape.numpy(), [2, 9, 9, 1536]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py
faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A wrapper around the Keras InceptionResnetV2 models for object detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm class _LayersOverride(object): """Alternative Keras layers interface for the Keras InceptionResNetV2.""" def __init__(self, batchnorm_training, output_stride=16, align_feature_maps=False, batchnorm_scale=False, default_batchnorm_momentum=0.999, default_batchnorm_epsilon=1e-3, weight_decay=0.00004): """Alternative tf.keras.layers interface, for use by InceptionResNetV2. It is used by the Keras applications kwargs injection API to modify the Inception Resnet V2 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Supports freezing batch norm layers - Adds support for feature map alignment (like in the Slim model) - Adds support for changing the output stride (like in the Slim model) - Adds support for overriding various batch norm hyperparameters Because the Keras inception resnet v2 application does not assign explicit names to most individual layers, the injection of output stride support works by identifying convolution layers according to their filter counts and pre-feature-map-alignment padding arguments. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. output_stride: A scalar that specifies the requested ratio of input to output spatial resolution. Only supports 8 and 16. align_feature_maps: When true, changes all the VALID paddings in the network to SAME padding so that the feature maps are aligned. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. Batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: small float added to variance to avoid dividing by zero. weight_decay: the l2 regularization weight decay for weights variables. (gets multiplied by 0.5 to map from slim l2 regularization weight to Keras l2 regularization weight). """ self._use_atrous = output_stride == 8 self._align_feature_maps = align_feature_maps self._batchnorm_training = batchnorm_training self._batchnorm_scale = batchnorm_scale self._default_batchnorm_momentum = default_batchnorm_momentum self._default_batchnorm_epsilon = default_batchnorm_epsilon self.regularizer = tf.keras.regularizers.l2(weight_decay * 0.5) def Conv2D(self, filters, kernel_size, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras InceptionResnetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. If feature map alignment is enabled, the padding will be forced to 'same'. If output_stride is 8, some conv2d layers will be matched according to their name or filter counts or pre-alignment padding parameters, and will have the correct 'dilation rate' or 'strides' set. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A Keras Conv2D layer specified by the Object Detection hyperparameter configurations. """ kwargs['kernel_regularizer'] = self.regularizer kwargs['bias_regularizer'] = self.regularizer # Because the Keras application does not set explicit names for most layers, # (instead allowing names to auto-increment), we must match individual # layers in the model according to their filter count, name, or # pre-alignment mapping. This means we can only align the feature maps # after we have applied our updates in cases where output_stride=8. if self._use_atrous and (filters == 384): kwargs['strides'] = 1 name = kwargs.get('name') if self._use_atrous and ( (name and 'block17' in name) or (filters == 128 or filters == 160 or (filters == 192 and kwargs.get('padding', '').lower() != 'valid'))): kwargs['dilation_rate'] = 2 if self._align_feature_maps: kwargs['padding'] = 'same' return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def MaxPooling2D(self, pool_size, strides, **kwargs): """Builds a pooling layer according to the current Object Detection config. Overrides the Keras InceptionResnetV2 application's MaxPooling2D layers with ones that follow the spec specified by the Object Detection hyperparameters. If feature map alignment is enabled, the padding will be forced to 'same'. If output_stride is 8, some pooling layers will be matched according to their pre-alignment padding parameters, and will have their 'strides' argument overridden. Args: pool_size: The pool size specified by the Keras application. strides: The strides specified by the unwrapped Keras application. **kwargs: Keyword args specified by the Keras application for constructing the max pooling layer. Returns: A MaxPool2D layer specified by the Object Detection hyperparameter configurations. """ if self._use_atrous and kwargs.get('padding', '').lower() == 'valid': strides = 1 if self._align_feature_maps: kwargs['padding'] = 'same' return tf.keras.layers.MaxPool2D(pool_size, strides=strides, **kwargs) # We alias MaxPool2D because Keras has that alias MaxPool2D = MaxPooling2D # pylint: disable=invalid-name def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Keyword arguments from the `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ kwargs['scale'] = self._batchnorm_scale return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=self._default_batchnorm_epsilon, momentum=self._default_batchnorm_momentum, **kwargs) # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def inception_resnet_v2( batchnorm_training, output_stride=16, align_feature_maps=False, batchnorm_scale=False, weight_decay=0.00004, default_batchnorm_momentum=0.9997, default_batchnorm_epsilon=0.001, **kwargs): """Instantiates the InceptionResnetV2 architecture. (Modified for object detection) This wraps the InceptionResnetV2 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Supports freezing batch norm layers with FreezableBatchNorms - Adds support for feature map alignment (like in the Slim model) - Adds support for changing the output stride (like in the Slim model) - Changes the default batchnorm momentum to 0.9997 - Adds support for overriding various batchnorm hyperparameters Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. output_stride: A scalar that specifies the requested ratio of input to output spatial resolution. Only supports 8 and 16. align_feature_maps: When true, changes all the VALID paddings in the network to SAME padding so that the feature maps are aligned. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. weight_decay: the l2 regularization weight decay for weights variables. (gets multiplied by 0.5 to map from slim l2 regularization weight to Keras l2 regularization weight). default_batchnorm_momentum: Float. Batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: small float added to variance to avoid dividing by zero. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.InceptionResNetV2` method that constructs the Keras model. Returns: A Keras model instance. """ if output_stride != 8 and output_stride != 16: raise ValueError('output_stride must be 8 or 16.') layers_override = _LayersOverride( batchnorm_training, output_stride, align_feature_maps=align_feature_maps, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, weight_decay=weight_decay) return tf.keras.applications.InceptionResNetV2( layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/inception_resnet_v2.py
inception_resnet_v2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing the Hourglass network.""" import unittest from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.models.keras_models import hourglass_network as hourglass from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase): def test_identity_layer(self): layer = hourglass.IdentityLayer() output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) self.assertEqual(output.shape, (2, 32, 32, 3)) def test_skip_conv_layer_stride_1(self): layer = hourglass.SkipConvolution(out_channels=8, stride=1) output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) self.assertEqual(output.shape, (2, 32, 32, 8)) def test_skip_conv_layer_stride_2(self): layer = hourglass.SkipConvolution(out_channels=8, stride=2) output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) self.assertEqual(output.shape, (2, 16, 16, 8)) @parameterized.parameters([{'kernel_size': 1}, {'kernel_size': 3}, {'kernel_size': 7}]) def test_conv_block(self, kernel_size): layer = hourglass.ConvolutionalBlock( out_channels=8, kernel_size=kernel_size, stride=1) output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) self.assertEqual(output.shape, (2, 32, 32, 8)) layer = hourglass.ConvolutionalBlock( out_channels=8, kernel_size=kernel_size, stride=2) output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) self.assertEqual(output.shape, (2, 16, 16, 8)) def test_residual_block_stride_1(self): layer = hourglass.ResidualBlock(out_channels=8, stride=1) output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) self.assertEqual(output.shape, (2, 32, 32, 8)) def test_residual_block_stride_2(self): layer = hourglass.ResidualBlock(out_channels=8, stride=2, skip_conv=True) output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) self.assertEqual(output.shape, (2, 16, 16, 8)) def test_input_downsample_block(self): layer = hourglass.InputDownsampleBlock( out_channels_initial_conv=4, out_channels_residual_block=8) output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) self.assertEqual(output.shape, (2, 8, 8, 8)) def test_input_conv_block(self): layer = hourglass.InputConvBlock( out_channels_initial_conv=4, out_channels_residual_block=8) output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) self.assertEqual(output.shape, (2, 32, 32, 8)) def test_encoder_decoder_block(self): layer = hourglass.EncoderDecoderBlock( num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], channel_dims=[4, 6, 8, 10, 12]) output = layer(np.zeros((2, 64, 64, 4), dtype=np.float32)) self.assertEqual(output.shape, (2, 64, 64, 4)) def test_hourglass_feature_extractor(self): model = hourglass.HourglassNetwork( num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14], num_hourglasses=2) outputs = model(np.zeros((2, 64, 64, 3), dtype=np.float32)) self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class HourglassDepthTest(tf.test.TestCase): def test_hourglass_104(self): net = hourglass.hourglass_104() self.assertEqual(hourglass.hourglass_depth(net), 104) def test_hourglass_10(self): net = hourglass.hourglass_10(2, initial_downsample=False) self.assertEqual(hourglass.hourglass_depth(net), 10) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) def test_hourglass_20(self): net = hourglass.hourglass_20(2, initial_downsample=False) self.assertEqual(hourglass.hourglass_depth(net), 20) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) def test_hourglass_32(self): net = hourglass.hourglass_32(2, initial_downsample=False) self.assertEqual(hourglass.hourglass_depth(net), 32) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) def test_hourglass_52(self): net = hourglass.hourglass_52(2, initial_downsample=False) self.assertEqual(hourglass.hourglass_depth(net), 52) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) def test_hourglass_20_uniform_size(self): net = hourglass.hourglass_20_uniform_size(2) self.assertEqual(hourglass.hourglass_depth(net), 20) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) def test_hourglass_100(self): net = hourglass.hourglass_100(2, initial_downsample=False) self.assertEqual(hourglass.hourglass_depth(net), 100) outputs = net(tf.zeros((2, 32, 32, 3))) self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/hourglass_network_tf2_test.py
hourglass_network_tf2_test.py
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for inception_resnet_v2.py. This test mainly focuses on comparing slim inception resnet v2 and Keras inception resnet v2 for object detection. To verify the consistency of the two models, we compare: 1. Output shape of each layer given different inputs 2. Number of global variables We also visualize the model structure via Tensorboard, and compare the model layout and the parameters of each Op to make sure the two implementations are consistent. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.models.keras_models import inception_resnet_v2 from object_detection.utils import test_case from object_detection.utils import tf_version _KERAS_TO_SLIM_ENDPOINT_NAMES = { 'activation': 'Conv2d_1a_3x3', 'activation_1': 'Conv2d_2a_3x3', 'activation_2': 'Conv2d_2b_3x3', 'activation_3': 'Conv2d_3b_1x1', 'activation_4': 'Conv2d_4a_3x3', 'max_pooling2d': 'MaxPool_3a_3x3', 'max_pooling2d_1': 'MaxPool_5a_3x3', 'mixed_5b': 'Mixed_5b', 'mixed_6a': 'Mixed_6a', 'block17_20_ac': 'PreAuxLogits', 'mixed_7a': 'Mixed_7a', 'conv_7b_ac': 'Conv2d_7b_1x1', } _SLIM_ENDPOINT_SHAPES_128 = { 'Conv2d_1a_3x3': (2, 64, 64, 32), 'Conv2d_2a_3x3': (2, 64, 64, 32), 'Conv2d_2b_3x3': (2, 64, 64, 64), 'Conv2d_3b_1x1': (2, 32, 32, 80), 'Conv2d_4a_3x3': (2, 32, 32, 192), 'Conv2d_7b_1x1': (2, 4, 4, 1536), 'MaxPool_3a_3x3': (2, 32, 32, 64), 'MaxPool_5a_3x3': (2, 16, 16, 192), 'Mixed_5b': (2, 16, 16, 320), 'Mixed_6a': (2, 8, 8, 1088), 'Mixed_7a': (2, 4, 4, 2080), 'PreAuxLogits': (2, 8, 8, 1088)} _SLIM_ENDPOINT_SHAPES_128_STRIDE_8 = { 'Conv2d_1a_3x3': (2, 64, 64, 32), 'Conv2d_2a_3x3': (2, 64, 64, 32), 'Conv2d_2b_3x3': (2, 64, 64, 64), 'Conv2d_3b_1x1': (2, 32, 32, 80), 'Conv2d_4a_3x3': (2, 32, 32, 192), 'MaxPool_3a_3x3': (2, 32, 32, 64), 'MaxPool_5a_3x3': (2, 16, 16, 192), 'Mixed_5b': (2, 16, 16, 320), 'Mixed_6a': (2, 16, 16, 1088), 'PreAuxLogits': (2, 16, 16, 1088)} _SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE = { 'Conv2d_1a_3x3': (2, 63, 63, 32), 'Conv2d_2a_3x3': (2, 61, 61, 32), 'Conv2d_2b_3x3': (2, 61, 61, 64), 'Conv2d_3b_1x1': (2, 30, 30, 80), 'Conv2d_4a_3x3': (2, 28, 28, 192), 'Conv2d_7b_1x1': (2, 2, 2, 1536), 'MaxPool_3a_3x3': (2, 30, 30, 64), 'MaxPool_5a_3x3': (2, 13, 13, 192), 'Mixed_5b': (2, 13, 13, 320), 'Mixed_6a': (2, 6, 6, 1088), 'Mixed_7a': (2, 2, 2, 2080), 'PreAuxLogits': (2, 6, 6, 1088)} _SLIM_ENDPOINT_SHAPES_299 = {} _SLIM_ENDPOINT_SHAPES_299_STRIDE_8 = {} _SLIM_ENDPOINT_SHAPES_299_ALIGN_FEATURE_MAPS_FALSE = {} _KERAS_LAYERS_TO_CHECK = list(_KERAS_TO_SLIM_ENDPOINT_NAMES.keys()) _NUM_CHANNELS = 3 _BATCH_SIZE = 2 @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class InceptionResnetV2Test(test_case.TestCase): def _create_application_with_layer_outputs( self, layer_names, batchnorm_training, output_stride=16, align_feature_maps=False, batchnorm_scale=False, weight_decay=0.00004, default_batchnorm_momentum=0.9997, default_batchnorm_epsilon=0.001,): """Constructs Keras inception_resnet_v2 that extracts layer outputs.""" # Have to clear the Keras backend to ensure isolation in layer naming tf.keras.backend.clear_session() if not layer_names: layer_names = _KERAS_LAYERS_TO_CHECK full_model = inception_resnet_v2.inception_resnet_v2( batchnorm_training=batchnorm_training, output_stride=output_stride, align_feature_maps=align_feature_maps, weights=None, batchnorm_scale=batchnorm_scale, weight_decay=weight_decay, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, include_top=False) layer_outputs = [full_model.get_layer(name=layer).output for layer in layer_names] return tf.keras.Model( inputs=full_model.inputs, outputs=layer_outputs) def _check_returns_correct_shape( self, image_height, image_width, expected_feature_map_shape, layer_names=None, batchnorm_training=True, output_stride=16, align_feature_maps=False, batchnorm_scale=False, weight_decay=0.00004, default_batchnorm_momentum=0.9997, default_batchnorm_epsilon=0.001,): if not layer_names: layer_names = _KERAS_LAYERS_TO_CHECK model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=batchnorm_training, output_stride=output_stride, align_feature_maps=align_feature_maps, batchnorm_scale=batchnorm_scale, weight_decay=weight_decay, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon) image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, _NUM_CHANNELS).astype(np.float32) feature_maps = model(image_tensor) for feature_map, layer_name in zip(feature_maps, layer_names): endpoint_name = _KERAS_TO_SLIM_ENDPOINT_NAMES[layer_name] expected_shape = expected_feature_map_shape[endpoint_name] self.assertAllEqual(feature_map.shape, expected_shape) def _get_variables(self, layer_names=None): tf.keras.backend.clear_session() model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False) preprocessed_inputs = tf.random.uniform([4, 40, 40, _NUM_CHANNELS]) model(preprocessed_inputs) return model.variables def test_returns_correct_shapes_128(self): image_height = 128 image_width = 128 expected_feature_map_shape = ( _SLIM_ENDPOINT_SHAPES_128) self._check_returns_correct_shape( image_height, image_width, expected_feature_map_shape, align_feature_maps=True) def test_returns_correct_shapes_128_output_stride_8(self): image_height = 128 image_width = 128 expected_feature_map_shape = ( _SLIM_ENDPOINT_SHAPES_128_STRIDE_8) # Output stride of 8 not defined beyond 'block17_20_ac', which is # PreAuxLogits in slim. So, we exclude those layers in our Keras vs Slim # comparison. excluded_layers = {'mixed_7a', 'conv_7b_ac'} layer_names = [l for l in _KERAS_LAYERS_TO_CHECK if l not in excluded_layers] self._check_returns_correct_shape( image_height, image_width, expected_feature_map_shape, layer_names=layer_names, output_stride=8, align_feature_maps=True) def test_returns_correct_shapes_128_align_feature_maps_false( self): image_height = 128 image_width = 128 expected_feature_map_shape = ( _SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE) self._check_returns_correct_shape( image_height, image_width, expected_feature_map_shape, align_feature_maps=False) def test_hyperparam_override(self): model = inception_resnet_v2.inception_resnet_v2( batchnorm_training=True, default_batchnorm_momentum=0.2, default_batchnorm_epsilon=0.1, weights=None, include_top=False) bn_layer = model.get_layer(name='freezable_batch_norm') self.assertAllClose(bn_layer.momentum, 0.2) self.assertAllClose(bn_layer.epsilon, 0.1) def test_variable_count(self): variables = self._get_variables() # 896 is the number of variables from slim inception resnet v2 model. self.assertEqual(len(variables), 896) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py
inception_resnet_v2_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Write keras weights into a tensorflow checkpoint. The imagenet weights in `keras.applications` are downloaded from github. This script converts them into the tensorflow checkpoint format and stores them on disk where they can be easily accessible during training. """ from __future__ import print_function import os from absl import app import numpy as np import tensorflow.compat.v1 as tf FLAGS = tf.flags.FLAGS tf.flags.DEFINE_string('model', 'resnet_v2_101', 'The model to load. The following are supported: ' '"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", ' '"resnet_v2_101"') tf.flags.DEFINE_string('output_path', None, 'The directory to output weights in.') tf.flags.DEFINE_boolean('verify_weights', True, ('Verify the weights are loaded correctly by making ' 'sure the predictions are the same before and after ' 'saving.')) def init_model(name): """Creates a Keras Model with the specific ResNet version.""" if name == 'resnet_v1_50': model = tf.keras.applications.ResNet50(weights='imagenet') elif name == 'resnet_v1_101': model = tf.keras.applications.ResNet101(weights='imagenet') elif name == 'resnet_v2_50': model = tf.keras.applications.ResNet50V2(weights='imagenet') elif name == 'resnet_v2_101': model = tf.keras.applications.ResNet101V2(weights='imagenet') else: raise ValueError('Model {} not supported'.format(FLAGS.model)) return model def main(_): model = init_model(FLAGS.model) path = os.path.join(FLAGS.output_path, FLAGS.model) tf.gfile.MakeDirs(path) weights_path = os.path.join(path, 'weights') ckpt = tf.train.Checkpoint(feature_extractor=model) saved_path = ckpt.save(weights_path) if FLAGS.verify_weights: imgs = np.random.randn(1, 224, 224, 3).astype(np.float32) keras_preds = model(imgs) model = init_model(FLAGS.model) ckpt.restore(saved_path) loaded_weights_pred = model(imgs).numpy() if not np.all(np.isclose(keras_preds, loaded_weights_pred)): raise RuntimeError('The model was not saved correctly.') if __name__ == '__main__': tf.enable_v2_behavior() app.run(main)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/convert_keras_models.py
convert_keras_models.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Hourglass[1] network. [1]: https://arxiv.org/abs/1603.06937 """ import tensorflow.compat.v2 as tf BATCH_NORM_EPSILON = 1e-5 BATCH_NORM_MOMENTUM = 0.1 BATCH_NORM_FUSED = True class IdentityLayer(tf.keras.layers.Layer): """A layer which passes through the input as it is.""" def call(self, inputs): return inputs def _get_padding_for_kernel_size(kernel_size): if kernel_size == 7: return (3, 3) elif kernel_size == 3: return (1, 1) else: raise ValueError('Padding for kernel size {} not known.'.format( kernel_size)) def batchnorm(): try: return tf.keras.layers.experimental.SyncBatchNormalization( name='batchnorm', epsilon=1e-5, momentum=0.1) except AttributeError: return tf.keras.layers.BatchNormalization( name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED) class ConvolutionalBlock(tf.keras.layers.Layer): """Block that aggregates Convolution + Norm layer + ReLU.""" def __init__(self, kernel_size, out_channels, stride=1, relu=True, padding='same'): """Initializes the Convolutional block. Args: kernel_size: int, convolution kernel size. out_channels: int, the desired number of output channels. stride: Integer, stride used in the convolution. relu: bool, whether to use relu at the end of the layer. padding: str, the padding scheme to use when kernel_size <= 1 """ super(ConvolutionalBlock, self).__init__() if kernel_size > 1: padding = 'valid' padding_size = _get_padding_for_kernel_size(kernel_size) # TODO(vighneshb) Explore if removing and using padding option in conv # layer works. self.pad = tf.keras.layers.ZeroPadding2D(padding_size) else: self.pad = IdentityLayer() self.conv = tf.keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, use_bias=False, strides=stride, padding=padding) self.norm = batchnorm() if relu: self.relu = tf.keras.layers.ReLU() else: self.relu = IdentityLayer() def call(self, inputs): net = self.pad(inputs) net = self.conv(net) net = self.norm(net) return self.relu(net) class SkipConvolution(ConvolutionalBlock): """The skip connection layer for a ResNet.""" def __init__(self, out_channels, stride): """Initializes the skip convolution layer. Args: out_channels: int, the desired number of output channels. stride: int, the stride for the layer. """ super(SkipConvolution, self).__init__( out_channels=out_channels, kernel_size=1, stride=stride, relu=False) class ResidualBlock(tf.keras.layers.Layer): """A Residual block.""" def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1, padding='same'): """Initializes the Residual block. Args: out_channels: int, the desired number of output channels. skip_conv: bool, whether to use a conv layer for skip connections. kernel_size: int, convolution kernel size. stride: Integer, stride used in the convolution. padding: str, the type of padding to use. """ super(ResidualBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=kernel_size, out_channels=out_channels, stride=stride) self.conv = tf.keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, use_bias=False, strides=1, padding=padding) self.norm = batchnorm() if skip_conv: self.skip = SkipConvolution(out_channels=out_channels, stride=stride) else: self.skip = IdentityLayer() self.relu = tf.keras.layers.ReLU() def call(self, inputs): net = self.conv_block(inputs) net = self.conv(net) net = self.norm(net) net_skip = self.skip(inputs) return self.relu(net + net_skip) class InputDownsampleBlock(tf.keras.layers.Layer): """Block for the initial feature downsampling.""" def __init__(self, out_channels_initial_conv, out_channels_residual_block): """Initializes the downsample block. Args: out_channels_initial_conv: int, the desired number of output channels in the initial conv layer. out_channels_residual_block: int, the desired number of output channels in the underlying residual block. """ super(InputDownsampleBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=7, out_channels=out_channels_initial_conv, stride=2, padding='valid') self.residual_block = ResidualBlock( out_channels=out_channels_residual_block, stride=2, skip_conv=True) def call(self, inputs): return self.residual_block(self.conv_block(inputs)) class InputConvBlock(tf.keras.layers.Layer): """Block for the initial feature convolution. This block is used in the hourglass network when we don't want to downsample the input. """ def __init__(self, out_channels_initial_conv, out_channels_residual_block): """Initializes the downsample block. Args: out_channels_initial_conv: int, the desired number of output channels in the initial conv layer. out_channels_residual_block: int, the desired number of output channels in the underlying residual block. """ super(InputConvBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=3, out_channels=out_channels_initial_conv, stride=1, padding='valid') self.residual_block = ResidualBlock( out_channels=out_channels_residual_block, stride=1, skip_conv=True) def call(self, inputs): return self.residual_block(self.conv_block(inputs)) def _make_repeated_residual_blocks(out_channels, num_blocks, initial_stride=1, residual_channels=None, initial_skip_conv=False): """Stack Residual blocks one after the other. Args: out_channels: int, the desired number of output channels. num_blocks: int, the number of residual blocks to be stacked. initial_stride: int, the stride of the initial residual block. residual_channels: int, the desired number of output channels in the intermediate residual blocks. If not specifed, we use out_channels. initial_skip_conv: bool, if set, the first residual block uses a skip convolution. This is useful when the number of channels in the input are not the same as residual_channels. Returns: blocks: A list of residual blocks to be applied in sequence. """ blocks = [] if residual_channels is None: residual_channels = out_channels for i in range(num_blocks - 1): # Only use the stride at the first block so we don't repeatedly downsample # the input stride = initial_stride if i == 0 else 1 # If the stide is more than 1, we cannot use an identity layer for the # skip connection and are forced to use a conv for the skip connection. skip_conv = stride > 1 if i == 0 and initial_skip_conv: skip_conv = True blocks.append( ResidualBlock(out_channels=residual_channels, stride=stride, skip_conv=skip_conv) ) if num_blocks == 1: # If there is only 1 block, the for loop above is not run, # therefore we honor the requested stride in the last residual block stride = initial_stride # We are forced to use a conv in the skip connection if stride > 1 skip_conv = stride > 1 else: stride = 1 skip_conv = residual_channels != out_channels blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv, stride=stride)) return blocks def _apply_blocks(inputs, blocks): net = inputs for block in blocks: net = block(net) return net class EncoderDecoderBlock(tf.keras.layers.Layer): """An encoder-decoder block which recursively defines the hourglass network.""" def __init__(self, num_stages, channel_dims, blocks_per_stage, stagewise_downsample=True, encoder_decoder_shortcut=True): """Initializes the encoder-decoder block. Args: num_stages: int, Number of stages in the network. At each stage we have 2 encoder and 1 decoder blocks. The second encoder block downsamples the input. channel_dims: int list, the output channels dimensions of stages in the network. `channel_dims[0]` is used to define the number of channels in the first encoder block and `channel_dims[1]` is used to define the number of channels in the second encoder block. The channels in the recursive inner layers are defined using `channel_dims[1:]` blocks_per_stage: int list, number of residual blocks to use at each stage. `blocks_per_stage[0]` defines the number of blocks at the current stage and `blocks_per_stage[1:]` is used at further stages. stagewise_downsample: bool, whether or not to downsample before passing inputs to the next stage. encoder_decoder_shortcut: bool, whether or not to use shortcut connections between encoder and decoder. """ super(EncoderDecoderBlock, self).__init__() out_channels = channel_dims[0] out_channels_downsampled = channel_dims[1] self.encoder_decoder_shortcut = encoder_decoder_shortcut if encoder_decoder_shortcut: self.merge_features = tf.keras.layers.Add() self.encoder_block1 = _make_repeated_residual_blocks( out_channels=out_channels, num_blocks=blocks_per_stage[0], initial_stride=1) initial_stride = 2 if stagewise_downsample else 1 self.encoder_block2 = _make_repeated_residual_blocks( out_channels=out_channels_downsampled, num_blocks=blocks_per_stage[0], initial_stride=initial_stride, initial_skip_conv=out_channels != out_channels_downsampled) if num_stages > 1: self.inner_block = [ EncoderDecoderBlock(num_stages - 1, channel_dims[1:], blocks_per_stage[1:], stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut) ] else: self.inner_block = _make_repeated_residual_blocks( out_channels=out_channels_downsampled, num_blocks=blocks_per_stage[1]) self.decoder_block = _make_repeated_residual_blocks( residual_channels=out_channels_downsampled, out_channels=out_channels, num_blocks=blocks_per_stage[0]) self.upsample = tf.keras.layers.UpSampling2D(initial_stride) def call(self, inputs): if self.encoder_decoder_shortcut: encoded_outputs = _apply_blocks(inputs, self.encoder_block1) encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2) inner_block_outputs = _apply_blocks( encoded_downsampled_outputs, self.inner_block) decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block) upsampled_outputs = self.upsample(decoded_outputs) if self.encoder_decoder_shortcut: return self.merge_features([encoded_outputs, upsampled_outputs]) else: return upsampled_outputs class HourglassNetwork(tf.keras.Model): """The hourglass network.""" def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage, blocks_per_stage, num_hourglasses, initial_downsample=True, stagewise_downsample=True, encoder_decoder_shortcut=True): """Intializes the feature extractor. Args: num_stages: int, Number of stages in the network. At each stage we have 2 encoder and 1 decoder blocks. The second encoder block downsamples the input. input_channel_dims: int, the number of channels in the input conv blocks. channel_dims_per_stage: int list, the output channel dimensions of each stage in the hourglass network. blocks_per_stage: int list, number of residual blocks to use at each stage in the hourglass network num_hourglasses: int, number of hourglas networks to stack sequentially. initial_downsample: bool, if set, downsamples the input by a factor of 4 before applying the rest of the network. Downsampling is done with a 7x7 convolution kernel, otherwise a 3x3 kernel is used. stagewise_downsample: bool, whether or not to downsample before passing inputs to the next stage. encoder_decoder_shortcut: bool, whether or not to use shortcut connections between encoder and decoder. """ super(HourglassNetwork, self).__init__() self.num_hourglasses = num_hourglasses self.initial_downsample = initial_downsample if initial_downsample: self.downsample_input = InputDownsampleBlock( out_channels_initial_conv=input_channel_dims, out_channels_residual_block=channel_dims_per_stage[0] ) else: self.conv_input = InputConvBlock( out_channels_initial_conv=input_channel_dims, out_channels_residual_block=channel_dims_per_stage[0] ) self.hourglass_network = [] self.output_conv = [] for _ in range(self.num_hourglasses): self.hourglass_network.append( EncoderDecoderBlock( num_stages=num_stages, channel_dims=channel_dims_per_stage, blocks_per_stage=blocks_per_stage, stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut) ) self.output_conv.append( ConvolutionalBlock(kernel_size=3, out_channels=channel_dims_per_stage[0]) ) self.intermediate_conv1 = [] self.intermediate_conv2 = [] self.intermediate_residual = [] for _ in range(self.num_hourglasses - 1): self.intermediate_conv1.append( ConvolutionalBlock( kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) ) self.intermediate_conv2.append( ConvolutionalBlock( kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) ) self.intermediate_residual.append( ResidualBlock(out_channels=channel_dims_per_stage[0]) ) self.intermediate_relu = tf.keras.layers.ReLU() def call(self, inputs): if self.initial_downsample: inputs = self.downsample_input(inputs) else: inputs = self.conv_input(inputs) outputs = [] for i in range(self.num_hourglasses): hourglass_output = self.hourglass_network[i](inputs) output = self.output_conv[i](hourglass_output) outputs.append(output) if i < self.num_hourglasses - 1: secondary_output = (self.intermediate_conv1[i](inputs) + self.intermediate_conv2[i](output)) secondary_output = self.intermediate_relu(secondary_output) inputs = self.intermediate_residual[i](secondary_output) return outputs @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" return self.num_hourglasses def _layer_depth(layer): """Compute depth of Conv/Residual blocks or lists of them.""" if isinstance(layer, list): return sum([_layer_depth(l) for l in layer]) elif isinstance(layer, ConvolutionalBlock): return 1 elif isinstance(layer, ResidualBlock): return 2 else: raise ValueError('Unknown layer - {}'.format(layer)) def _encoder_decoder_depth(network): """Helper function to compute depth of encoder-decoder blocks.""" encoder_block2_layers = _layer_depth(network.encoder_block2) decoder_block_layers = _layer_depth(network.decoder_block) if isinstance(network.inner_block[0], EncoderDecoderBlock): assert len(network.inner_block) == 1, 'Inner block is expected as length 1.' inner_block_layers = _encoder_decoder_depth(network.inner_block[0]) return inner_block_layers + encoder_block2_layers + decoder_block_layers elif isinstance(network.inner_block[0], ResidualBlock): return (encoder_block2_layers + decoder_block_layers + _layer_depth(network.inner_block)) else: raise ValueError('Unknown inner block type.') def hourglass_depth(network): """Helper function to verify depth of hourglass backbone.""" input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock # Only intermediate_conv2 and intermediate_residual are applied before # sending inputs to the later stages. intermediate_layers = ( _layer_depth(network.intermediate_conv2) + _layer_depth(network.intermediate_residual) ) # network.output_conv is applied before sending input to the later stages output_layers = _layer_depth(network.output_conv) encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in network.hourglass_network) return (input_conv_layers + encoder_decoder_layers + intermediate_layers + output_layers) def hourglass_104(): """The Hourglass-104 backbone. The architecture parameters are taken from [1]. Returns: network: An HourglassNetwork object implementing the Hourglass-104 backbone. [1]: https://arxiv.org/abs/1904.07850 """ return HourglassNetwork( input_channel_dims=128, channel_dims_per_stage=[256, 256, 384, 384, 384, 512], num_hourglasses=2, num_stages=5, blocks_per_stage=[2, 2, 2, 2, 2, 4], ) def single_stage_hourglass(input_channel_dims, channel_dims_per_stage, blocks_per_stage, initial_downsample=True, stagewise_downsample=True, encoder_decoder_shortcut=True): assert len(channel_dims_per_stage) == len(blocks_per_stage) return HourglassNetwork( input_channel_dims=input_channel_dims, channel_dims_per_stage=channel_dims_per_stage, num_hourglasses=1, num_stages=len(channel_dims_per_stage) - 1, blocks_per_stage=blocks_per_stage, initial_downsample=initial_downsample, stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut ) def hourglass_10(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[1, 1], channel_dims_per_stage=[nc * 2, nc * 2]) def hourglass_20(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3]) def hourglass_32(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[2, 2, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3]) def hourglass_52(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[2, 2, 2, 2, 2, 4], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) def hourglass_100(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[4, 4, 4, 4, 4, 8], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) def hourglass_20_uniform_size(num_channels): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], initial_downsample=False, stagewise_downsample=False) def hourglass_20_no_shortcut(num_channels): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], initial_downsample=False, encoder_decoder_shortcut=False)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/hourglass_network.py
hourglass_network.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for mobilenet_v1.py. This test mainly focuses on comparing slim MobilenetV1 and Keras MobilenetV1 for object detection. To verify the consistency of the two models, we compare: 1. Output shape of each layer given different inputs 2. Number of global variables We also visualize the model structure via Tensorboard, and compare the model layout and the parameters of each Op to make sure the two implementations are consistent. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models.keras_models import mobilenet_v1 from object_detection.models.keras_models import model_utils from object_detection.models.keras_models import test_utils from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version _KERAS_LAYERS_TO_CHECK = [ 'conv1_relu', 'conv_dw_1_relu', 'conv_pw_1_relu', 'conv_dw_2_relu', 'conv_pw_2_relu', 'conv_dw_3_relu', 'conv_pw_3_relu', 'conv_dw_4_relu', 'conv_pw_4_relu', 'conv_dw_5_relu', 'conv_pw_5_relu', 'conv_dw_6_relu', 'conv_pw_6_relu', 'conv_dw_7_relu', 'conv_pw_7_relu', 'conv_dw_8_relu', 'conv_pw_8_relu', 'conv_dw_9_relu', 'conv_pw_9_relu', 'conv_dw_10_relu', 'conv_pw_10_relu', 'conv_dw_11_relu', 'conv_pw_11_relu', 'conv_dw_12_relu', 'conv_pw_12_relu', 'conv_dw_13_relu', 'conv_pw_13_relu', ] _NUM_CHANNELS = 3 _BATCH_SIZE = 2 @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MobilenetV1Test(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { train: true, scale: false, center: true, decay: 0.2, epsilon: 0.1, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _create_application_with_layer_outputs( self, layer_names, batchnorm_training, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Constructs Keras MobilenetV1 that extracts intermediate layer outputs.""" if not layer_names: layer_names = _KERAS_LAYERS_TO_CHECK full_model = mobilenet_v1.mobilenet_v1( batchnorm_training=batchnorm_training, conv_hyperparams=conv_hyperparams, weights=None, use_explicit_padding=use_explicit_padding, alpha=alpha, min_depth=min_depth, conv_defs=conv_defs, include_top=False) layer_outputs = [full_model.get_layer(name=layer).output for layer in layer_names] return tf.keras.Model( inputs=full_model.inputs, outputs=layer_outputs) def _check_returns_correct_shape( self, image_height, image_width, depth_multiplier, expected_feature_map_shape, use_explicit_padding=False, min_depth=8, layer_names=None, conv_defs=None): model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=depth_multiplier, conv_defs=conv_defs) image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, _NUM_CHANNELS).astype(np.float32) feature_maps = model(image_tensor) for feature_map, expected_shape in zip(feature_maps, expected_feature_map_shape): self.assertAllEqual(feature_map.shape, expected_shape) def _check_returns_correct_shapes_with_dynamic_inputs( self, image_height, image_width, depth_multiplier, expected_feature_map_shape, use_explicit_padding=False, min_depth=8, layer_names=None): image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width, _NUM_CHANNELS], dtype=tf.float32) model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=use_explicit_padding, alpha=depth_multiplier) feature_maps = model(image_tensor) for feature_map, expected_shape in zip(feature_maps, expected_feature_map_shape): self.assertAllEqual(feature_map.shape, expected_shape) def _get_variables(self, depth_multiplier, layer_names=None): tf.keras.backend.clear_session() model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=False, alpha=depth_multiplier) preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) model(preprocessed_inputs) return model.variables def test_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v1_expected_feature_map_shape_128) self._check_returns_correct_shape( image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_128_explicit_padding( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v1_expected_feature_map_shape_128_explicit_padding) self._check_returns_correct_shape( image_height, image_width, depth_multiplier, expected_feature_map_shape, use_explicit_padding=True) def test_returns_correct_shapes_with_dynamic_inputs( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs) self._check_returns_correct_shapes_with_dynamic_inputs( image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v1_expected_feature_map_shape_299) self._check_returns_correct_shape( image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 expected_feature_map_shape = ( test_utils.moblenet_v1_expected_feature_map_shape_enforcing_min_depth) self._check_returns_correct_shape( image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_with_conv_defs( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 conv_def_block_12 = model_utils.ConvDefs( conv_name='conv_pw_12', filters=512) conv_def_block_13 = model_utils.ConvDefs( conv_name='conv_pw_13', filters=256) conv_defs = [conv_def_block_12, conv_def_block_13] expected_feature_map_shape = ( test_utils.moblenet_v1_expected_feature_map_shape_with_conv_defs) self._check_returns_correct_shape( image_height, image_width, depth_multiplier, expected_feature_map_shape, conv_defs=conv_defs) def test_hyperparam_override(self): hyperparams = self._build_conv_hyperparams() model = mobilenet_v1.mobilenet_v1( batchnorm_training=True, conv_hyperparams=hyperparams, weights=None, use_explicit_padding=False, alpha=1.0, min_depth=32, include_top=False) hyperparams.params() bn_layer = model.get_layer(name='conv_pw_5_bn') self.assertAllClose(bn_layer.momentum, 0.2) self.assertAllClose(bn_layer.epsilon, 0.1) def test_variable_count(self): depth_multiplier = 1 variables = self._get_variables(depth_multiplier) # 135 is the number of variables from slim MobilenetV1 model. self.assertEqual(len(variables), 135) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v1_tf2_test.py
mobilenet_v1_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for resnet_v1.py. This test mainly focuses on comparing slim resnet v1 and Keras resnet v1 for object detection. To verify the consistency of the two models, we compare: 1. Output shape of each layer given different inputs. 2. Number of global variables. """ import unittest from absl.testing import parameterized import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models.keras_models import resnet_v1 from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version _EXPECTED_SHAPES_224_RESNET50 = { 'conv2_block3_out': (4, 56, 56, 256), 'conv3_block4_out': (4, 28, 28, 512), 'conv4_block6_out': (4, 14, 14, 1024), 'conv5_block3_out': (4, 7, 7, 2048), } _EXPECTED_SHAPES_224_RESNET101 = { 'conv2_block3_out': (4, 56, 56, 256), 'conv3_block4_out': (4, 28, 28, 512), 'conv4_block23_out': (4, 14, 14, 1024), 'conv5_block3_out': (4, 7, 7, 2048), } _EXPECTED_SHAPES_224_RESNET152 = { 'conv2_block3_out': (4, 56, 56, 256), 'conv3_block8_out': (4, 28, 28, 512), 'conv4_block36_out': (4, 14, 14, 1024), 'conv5_block3_out': (4, 7, 7, 2048), } _RESNET_NAMES = ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152'] _RESNET_MODELS = [ resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101, resnet_v1.resnet_v1_152 ] _RESNET_SHAPES = [ _EXPECTED_SHAPES_224_RESNET50, _EXPECTED_SHAPES_224_RESNET101, _EXPECTED_SHAPES_224_RESNET152 ] _NUM_CHANNELS = 3 _BATCH_SIZE = 4 @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ResnetV1Test(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6, regularizer { l2_regularizer { weight: 0.0004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { scale: true, decay: 0.997, epsilon: 0.001, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _create_application_with_layer_outputs(self, model_index, batchnorm_training, batchnorm_scale=True, weight_decay=0.0001, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5): """Constructs Keras resnet_v1 that extracts layer outputs.""" # Have to clear the Keras backend to ensure isolation in layer naming tf.keras.backend.clear_session() layer_names = _RESNET_SHAPES[model_index].keys() full_model = _RESNET_MODELS[model_index]( batchnorm_training=batchnorm_training, weights=None, batchnorm_scale=batchnorm_scale, weight_decay=weight_decay, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, include_top=False) layer_outputs = [ full_model.get_layer(name=layer).output for layer in layer_names ] return tf.keras.Model(inputs=full_model.inputs, outputs=layer_outputs) def _check_returns_correct_shape(self, image_height, image_width, model_index, expected_feature_map_shape, batchnorm_training=True, batchnorm_scale=True, weight_decay=0.0001, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5): model = self._create_application_with_layer_outputs( model_index=model_index, batchnorm_training=batchnorm_training, batchnorm_scale=batchnorm_scale, weight_decay=weight_decay, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon) image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, _NUM_CHANNELS).astype(np.float32) feature_maps = model(image_tensor) layer_names = _RESNET_SHAPES[model_index].keys() for feature_map, layer_name in zip(feature_maps, layer_names): expected_shape = _RESNET_SHAPES[model_index][layer_name] self.assertAllEqual(feature_map.shape, expected_shape) def _get_variables(self, model_index): tf.keras.backend.clear_session() model = self._create_application_with_layer_outputs( model_index, batchnorm_training=False) preprocessed_inputs = tf.random.uniform([2, 40, 40, _NUM_CHANNELS]) model(preprocessed_inputs) return model.variables def test_returns_correct_shapes_224(self): image_height = 224 image_width = 224 for model_index, _ in enumerate(_RESNET_NAMES): expected_feature_map_shape = _RESNET_SHAPES[model_index] self._check_returns_correct_shape(image_height, image_width, model_index, expected_feature_map_shape) def test_hyperparam_override(self): for model_name in _RESNET_MODELS: model = model_name( batchnorm_training=True, default_batchnorm_momentum=0.2, default_batchnorm_epsilon=0.1, weights=None, include_top=False) bn_layer = model.get_layer(name='conv1_bn') self.assertAllClose(bn_layer.momentum, 0.2) self.assertAllClose(bn_layer.epsilon, 0.1) def test_variable_count(self): # The number of variables from slim resnetv1-* model. variable_nums = [265, 520, 775] for model_index, var_num in enumerate(variable_nums): variables = self._get_variables(model_index) self.assertEqual(len(variables), var_num) class ResnetShapeTest(test_case.TestCase, parameterized.TestCase): @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') @parameterized.parameters( { 'resnet_type': 'resnet_v1_34', 'output_layer_names': [ 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out' ] }, { 'resnet_type': 'resnet_v1_18', 'output_layer_names': [ 'conv2_block2_out', 'conv3_block2_out', 'conv4_block2_out', 'conv5_block2_out' ] }) def test_output_shapes(self, resnet_type, output_layer_names): if resnet_type == 'resnet_v1_34': model = resnet_v1.resnet_v1_34(input_shape=(64, 64, 3), weights=None) else: model = resnet_v1.resnet_v1_18(input_shape=(64, 64, 3), weights=None) outputs = [ model.get_layer(output_layer_name).output for output_layer_name in output_layer_names ] resnet_model = tf.keras.models.Model(inputs=model.input, outputs=outputs) outputs = resnet_model(np.zeros((2, 64, 64, 3), dtype=np.float32)) # Check the shape of 'conv2_block3_out': self.assertEqual(outputs[0].shape, [2, 16, 16, 64]) # Check the shape of 'conv3_block4_out': self.assertEqual(outputs[1].shape, [2, 8, 8, 128]) # Check the shape of 'conv4_block6_out': self.assertEqual(outputs[2].shape, [2, 4, 4, 256]) # Check the shape of 'conv5_block3_out': self.assertEqual(outputs[3].shape, [2, 2, 2, 512]) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/resnet_v1_tf2_test.py
resnet_v1_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A wrapper around the Keras Resnet V1 models for object detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from tensorflow.python.keras.applications import resnet from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name """Pads the input along the spatial dimensions independently of input size. Pads the input such that if it was used in a convolution with 'VALID' padding, the output would have the same dimensions as if the unpadded input was used in a convolution with 'SAME' padding. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. rate: An integer, rate for atrous convolution. Returns: output: A tensor of size [batch, height_out, width_out, channels] with the input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = tf.pad( inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs class _LayersOverride(object): """Alternative Keras layers interface for the Keras Resnet V1.""" def __init__(self, batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1): """Alternative tf.keras.layers interface, for use by the Keras Resnet V1. The class is used by the Keras applications kwargs injection API to modify the Resnet V1 Keras application with changes required by the Object Detection API. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. """ self._batchnorm_training = batchnorm_training self._batchnorm_scale = batchnorm_scale self._default_batchnorm_momentum = default_batchnorm_momentum self._default_batchnorm_epsilon = default_batchnorm_epsilon self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._depth_multiplier = depth_multiplier self.regularizer = tf.keras.regularizers.l2(weight_decay) self.initializer = tf.variance_scaling_initializer() def _FixedPaddingLayer(self, kernel_size, rate=1): # pylint: disable=invalid-name return tf.keras.layers.Lambda( lambda x: _fixed_padding(x, kernel_size, rate)) def Conv2D(self, filters, kernel_size, **kwargs): # pylint: disable=invalid-name """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras Resnet application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. """ # Apply the minimum depth to the convolution layers. filters = max(int(filters * self._depth_multiplier), self._min_depth) if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer # Set use_bias as false to keep it consistent with Slim Resnet model. kwargs['use_bias'] = False kwargs['padding'] = 'same' stride = kwargs.get('strides') if stride and kernel_size and stride > 1 and kernel_size > 1: kwargs['padding'] = 'valid' def padded_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D( filters, kernel_size, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def Activation(self, *args, **kwargs): # pylint: disable=unused-argument,invalid-name """Builds an activation layer. Overrides the Keras application Activation layer specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.layers.Activation` interface. **kwargs: Only the name is used, required to match `tf.keras.layers.Activation` interface. Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu, name=name) def BatchNormalization(self, **kwargs): # pylint: disable=invalid-name """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: kwargs['scale'] = self._batchnorm_scale kwargs['epsilon'] = self._default_batchnorm_epsilon return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, momentum=self._default_batchnorm_momentum, **kwargs) def Input(self, shape): # pylint: disable=invalid-name """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: A tuple of integers representing the shape of the input, which includes both spatial share and channels, but not the batch size. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) def MaxPooling2D(self, pool_size, **kwargs): # pylint: disable=invalid-name """Builds a MaxPooling2D layer with default padding as 'SAME'. This is specified by the default resnet arg_scope in slim. Args: pool_size: The pool size specified by the Keras application. **kwargs: Ignored, required to match the Keras applications usage. Returns: A MaxPooling2D layer with default padding as 'SAME'. """ kwargs['padding'] = 'same' return tf.keras.layers.MaxPooling2D(pool_size, **kwargs) # Add alias as Keras also has it. MaxPool2D = MaxPooling2D # pylint: disable=invalid-name def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument,invalid-name """Replaces explicit padding in the Keras application with a no-op. Args: padding: The padding values for image height and width. **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def resnet_v1_50(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-50 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet50( layers=layers_override, **kwargs) def resnet_v1_101(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-101 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet101( layers=layers_override, **kwargs) def resnet_v1_152(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-152 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet152( layers=layers_override, **kwargs) # pylint: enable=invalid-name # The following codes are based on the existing keras ResNet model pattern: # google3/third_party/tensorflow/python/keras/applications/resnet.py def block_basic(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): """A residual block for ResNet18/34. Args: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. conv_shortcut: default False, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. Returns: Output tensor for the residual block. """ layers = tf.keras.layers bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1 preact = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')( x) preact = layers.Activation('relu', name=name + '_preact_relu')(preact) if conv_shortcut: shortcut = layers.Conv2D( filters, 1, strides=1, name=name + '_0_conv')( preact) else: shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x x = layers.ZeroPadding2D( padding=((1, 1), (1, 1)), name=name + '_1_pad')( preact) x = layers.Conv2D( filters, kernel_size, strides=1, use_bias=False, name=name + '_1_conv')( x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')( x) x = layers.Activation('relu', name=name + '_1_relu')(x) x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) x = layers.Conv2D( filters, kernel_size, strides=stride, use_bias=False, name=name + '_2_conv')( x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')( x) x = layers.Activation('relu', name=name + '_2_relu')(x) x = layers.Add(name=name + '_out')([shortcut, x]) return x def stack_basic(x, filters, blocks, stride1=2, name=None): """A set of stacked residual blocks for ResNet18/34. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. name: string, stack label. Returns: Output tensor for the stacked blocks. """ x = block_basic(x, filters, conv_shortcut=True, name=name + '_block1') for i in range(2, blocks): x = block_basic(x, filters, name=name + '_block' + str(i)) x = block_basic( x, filters, stride=stride1, name=name + '_block' + str(blocks)) return x def resnet_v1_18(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the ResNet18 architecture.""" def stack_fn(x): x = stack_basic(x, 64, 2, stride1=1, name='conv2') x = stack_basic(x, 128, 2, name='conv3') x = stack_basic(x, 256, 2, name='conv4') return stack_basic(x, 512, 2, name='conv5') return resnet.ResNet( stack_fn, True, True, 'resnet18', include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation=classifier_activation) def resnet_v1_34(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the ResNet34 architecture.""" def stack_fn(x): x = stack_basic(x, 64, 3, stride1=1, name='conv2') x = stack_basic(x, 128, 4, name='conv3') x = stack_basic(x, 256, 6, name='conv4') return stack_basic(x, 512, 3, name='conv5') return resnet.ResNet( stack_fn, True, True, 'resnet34', include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation=classifier_activation)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/resnet_v1.py
resnet_v1.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utils for other test files.""" # import tensorflow as tf # # from nets import mobilenet_v1 # # slim = tf.contrib.slim # # # Layer names of Slim to map Keras layer names in MobilenetV1 # _MOBLIENET_V1_SLIM_ENDPOINTS = [ # 'Conv2d_0', # 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', # 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', # 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', # 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', # 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', # 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', # 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', # 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', # 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', # 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', # 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', # 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', # 'Conv2d_13_depthwise', 'Conv2d_13_pointwise' # ] # # # # Function to get the output shape of each layer in Slim. It's used to # # generate the following constant expected_feature_map_shape for MobilenetV1. # # Similarly, this can also apply to MobilenetV2. # def _get_slim_endpoint_shapes(inputs, depth_multiplier=1.0, min_depth=8, # use_explicit_padding=False): # with slim.arg_scope([slim.conv2d, slim.separable_conv2d], # normalizer_fn=slim.batch_norm): # _, end_points = mobilenet_v1.mobilenet_v1_base( # inputs, final_endpoint='Conv2d_13_pointwise', # depth_multiplier=depth_multiplier, min_depth=min_depth, # use_explicit_padding=use_explicit_padding) # return [end_points[endpoint_name].get_shape() # for endpoint_name in _MOBLIENET_V1_SLIM_ENDPOINTS] # For Mobilenet V1 moblenet_v1_expected_feature_map_shape_128 = [ (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), ] moblenet_v1_expected_feature_map_shape_128_explicit_padding = [ (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), ] mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs = [ (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), ] moblenet_v1_expected_feature_map_shape_299 = [ (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), (2, 10, 10, 1024), (2, 10, 10, 1024), (2, 10, 10, 1024), ] moblenet_v1_expected_feature_map_shape_enforcing_min_depth = [ (2, 150, 150, 8), (2, 150, 150, 8), (2, 150, 150, 8), (2, 75, 75, 8), (2, 75, 75, 8), (2, 75, 75, 8), (2, 75, 75, 8), (2, 38, 38, 8), (2, 38, 38, 8), (2, 38, 38, 8), (2, 38, 38, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 10, 10, 8), (2, 10, 10, 8), (2, 10, 10, 8), (2, 10, 10, 8), ] moblenet_v1_expected_feature_map_shape_with_conv_defs = [ (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), (2, 10, 10, 512), (2, 10, 10, 512), (2, 10, 10, 256), ] # For Mobilenet V2 moblenet_v2_expected_feature_map_shape_128 = [ (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 320), (2, 4, 4, 1280) ] moblenet_v2_expected_feature_map_shape_128_explicit_padding = [ (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 320), (2, 4, 4, 1280) ] mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs = [ (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 320), (2, 4, 4, 1280) ] moblenet_v2_expected_feature_map_shape_299 = [ (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 320), (2, 10, 10, 1280) ] moblenet_v2_expected_feature_map_shape_enforcing_min_depth = [ (2, 150, 150, 32), (2, 150, 150, 192), (2, 75, 75, 192), (2, 75, 75, 32), (2, 75, 75, 192), (2, 75, 75, 192), (2, 75, 75, 32), (2, 75, 75, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 32) ] moblenet_v2_expected_feature_map_shape_with_conv_defs = [ (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 320), (2, 10, 10, 256) ]
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/test_utils.py
test_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A wrapper around the Keras MobilenetV1 models for object detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name """Pads the input along the spatial dimensions independently of input size. Pads the input such that if it was used in a convolution with 'VALID' padding, the output would have the same dimensions as if the unpadded input was used in a convolution with 'SAME' padding. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. rate: An integer, rate for atrous convolution. Returns: output: A tensor of size [batch, height_out, width_out, channels] with the input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], [0, 0]]) return padded_inputs class _LayersOverride(object): """Alternative Keras layers interface for the Keras MobileNetV1.""" def __init__(self, batchnorm_training, default_batchnorm_momentum=0.999, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Alternative tf.keras.layers interface, for use by the Keras MobileNetV1. It is used by the Keras applications kwargs injection API to modify the MobilenetV1 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v1 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV1 paper. It modifies the number of filters in each convolutional layer. It's called depth multiplier in Keras application MobilenetV1. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v1 body. Default is `None` to use the default mobilenet_v1 network layout. """ self._alpha = alpha self._batchnorm_training = batchnorm_training self._default_batchnorm_momentum = default_batchnorm_momentum self._conv_hyperparams = conv_hyperparams self._use_explicit_padding = use_explicit_padding self._min_depth = min_depth self._conv_defs = conv_defs self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) self.initializer = tf.truncated_normal_initializer(stddev=0.09) def _FixedPaddingLayer(self, kernel_size, rate=1): return tf.keras.layers.Lambda( lambda x: _fixed_padding(x, kernel_size, rate)) def Conv2D(self, filters, kernel_size, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras MobileNetV1 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. In this function, the kernel size is expected to be pair of numbers and the numbers must be equal for this function. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. Raises: ValueError: if kernel size is not a pair of equal integers (representing a square kernel). """ if not isinstance(kernel_size, tuple): raise ValueError('kernel is expected to be a tuple.') if len(kernel_size) != 2: raise ValueError('kernel is expected to be length two.') if kernel_size[0] != kernel_size[1]: raise ValueError('kernel is expected to be square.') layer_name = kwargs['name'] if self._conv_defs: conv_filters = model_utils.get_conv_def(self._conv_defs, layer_name) if conv_filters: filters = conv_filters # Apply the width multiplier and the minimum depth to the convolution layers filters = int(filters * self._alpha) if self._min_depth and filters < self._min_depth: filters = self._min_depth if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer kwargs['padding'] = 'same' if self._use_explicit_padding and kernel_size[0] > 1: kwargs['padding'] = 'valid' def padded_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D( filters, kernel_size, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def DepthwiseConv2D(self, kernel_size, **kwargs): """Builds a DepthwiseConv2D according to the Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras DepthwiseConv2D layer to the input argument, or that will first pad the input then apply the depthwise convolution. """ if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) # Both regularizer and initializaer also applies to depthwise layer in # MobilenetV1, so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] else: kwargs['depthwise_regularizer'] = self.regularizer kwargs['depthwise_initializer'] = self.initializer kwargs['padding'] = 'same' if self._use_explicit_padding: kwargs['padding'] = 'valid' def padded_depthwise_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.DepthwiseConv2D( kernel_size, **kwargs)(padded_features) return padded_depthwise_conv else: return tf.keras.layers.DepthwiseConv2D(kernel_size, **kwargs) def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=1e-3, momentum=self._default_batchnorm_momentum, name=name) def Input(self, shape): """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: The shape for the input layer to use. (Does not include a dimension for the batch size). Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) # pylint: disable=unused-argument def ReLU(self, *args, **kwargs): """Builds an activation layer. Overrides the Keras application ReLU with the activation specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.ReLU` interface **kwargs: Only the name is used, required to match `tf.keras.ReLU` interface Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu6, name=name) # pylint: enable=unused-argument # pylint: disable=unused-argument def ZeroPadding2D(self, padding, **kwargs): """Replaces explicit padding in the Keras application with a no-op. Args: padding: The padding values for image height and width. **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # pylint: enable=unused-argument # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def mobilenet_v1(batchnorm_training, default_batchnorm_momentum=0.9997, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None, **kwargs): """Instantiates the MobileNetV1 architecture, modified for object detection. This wraps the MobileNetV1 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Changes the default batchnorm momentum to 0.9997 - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions - Makes the Input layer use a tf.placeholder_with_default instead of a tf.placeholder, to work on TPUs. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v1 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV1 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v1 body. Default is `None` to use the default mobilenet_v1 network layout. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras model instance. """ layers_override = _LayersOverride( batchnorm_training, default_batchnorm_momentum=default_batchnorm_momentum, conv_hyperparams=conv_hyperparams, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=alpha, conv_defs=conv_defs) return tf.keras.applications.MobileNet( alpha=alpha, layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v1.py
mobilenet_v1.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A wrapper around the MobileNet v2 models for Keras, for object detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils from object_detection.utils import ops # pylint: disable=invalid-name # This method copied from the slim mobilenet base network code (same license) def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class _LayersOverride(object): """Alternative Keras layers interface for the Keras MobileNetV2.""" def __init__(self, batchnorm_training, default_batchnorm_momentum=0.999, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Alternative tf.keras.layers interface, for use by the Keras MobileNetV2. It is used by the Keras applications kwargs injection API to modify the Mobilenet v2 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v2 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV2 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v2 body. Default is `None` to use the default mobilenet_v2 network layout. """ self._alpha = alpha self._batchnorm_training = batchnorm_training self._default_batchnorm_momentum = default_batchnorm_momentum self._conv_hyperparams = conv_hyperparams self._use_explicit_padding = use_explicit_padding self._min_depth = min_depth self._conv_defs = conv_defs self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) self.initializer = tf.truncated_normal_initializer(stddev=0.09) def _FixedPaddingLayer(self, kernel_size): return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size)) def Conv2D(self, filters, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. """ # Make sure 'alpha' is always applied to the last convolution block's size # (This overrides the Keras application's functionality) layer_name = kwargs.get('name') if layer_name == 'Conv_1': if self._conv_defs: filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1') else: filters = 1280 if self._alpha < 1.0: filters = _make_divisible(filters * self._alpha, 8) # Apply the minimum depth to the convolution layers if (self._min_depth and (filters < self._min_depth) and not kwargs.get('name').endswith('expand')): filters = self._min_depth if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer kwargs['padding'] = 'same' kernel_size = kwargs.get('kernel_size') if self._use_explicit_padding and kernel_size > 1: kwargs['padding'] = 'valid' def padded_conv(features): padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, **kwargs) def DepthwiseConv2D(self, **kwargs): """Builds a DepthwiseConv2D according to the Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras DepthwiseConv2D layer to the input argument, or that will first pad the input then apply the depthwise convolution. """ if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) # Both the regularizer and initializer apply to the depthwise layer in # MobilenetV1, so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] else: kwargs['depthwise_regularizer'] = self.regularizer kwargs['depthwise_initializer'] = self.initializer kwargs['padding'] = 'same' kernel_size = kwargs.get('kernel_size') if self._use_explicit_padding and kernel_size > 1: kwargs['padding'] = 'valid' def padded_depthwise_conv(features): padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features) return padded_depthwise_conv else: return tf.keras.layers.DepthwiseConv2D(**kwargs) def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=1e-3, momentum=self._default_batchnorm_momentum, name=name) def Input(self, shape): """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: The shape for the input layer to use. (Does not include a dimension for the batch size). Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) # pylint: disable=unused-argument def ReLU(self, *args, **kwargs): """Builds an activation layer. Overrides the Keras application ReLU with the activation specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.ReLU` interface **kwargs: Only the name is used, required to match `tf.keras.ReLU` interface Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu6, name=name) # pylint: enable=unused-argument # pylint: disable=unused-argument def ZeroPadding2D(self, **kwargs): """Replaces explicit padding in the Keras application with a no-op. Args: **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # pylint: enable=unused-argument # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) def mobilenet_v2(batchnorm_training, default_batchnorm_momentum=0.9997, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None, **kwargs): """Instantiates the MobileNetV2 architecture, modified for object detection. This wraps the MobileNetV2 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Changes the default batchnorm momentum to 0.9997 - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions - Makes the Input layer use a tf.placeholder_with_default instead of a tf.placeholder, to work on TPUs. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v2 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV2 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v2 body. Default is `None` to use the default mobilenet_v2 network layout. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.MobilenetV2` method that constructs the Keras model. Returns: A Keras model instance. """ layers_override = _LayersOverride( batchnorm_training, default_batchnorm_momentum=default_batchnorm_momentum, conv_hyperparams=conv_hyperparams, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=alpha, conv_defs=conv_defs) return tf.keras.applications.MobileNetV2(alpha=alpha, layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v2.py
mobilenet_v2.py
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for mobilenet_v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.models.keras_models import mobilenet_v2 from object_detection.models.keras_models import model_utils from object_detection.models.keras_models import test_utils from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version _layers_to_check = [ 'Conv1_relu', 'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN', 'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN', 'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN', 'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN', 'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN', 'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN', 'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN', 'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN', 'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN', 'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN', 'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN', 'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN', 'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN', 'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN', 'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN', 'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN', 'out_relu'] @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MobilenetV2Test(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { train: true, scale: false, center: true, decay: 0.2, epsilon: 0.1, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _create_application_with_layer_outputs( self, layer_names, batchnorm_training, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Constructs Keras mobilenetv2 that extracts intermediate layer outputs.""" # Have to clear the Keras backend to ensure isolation in layer naming tf.keras.backend.clear_session() if not layer_names: layer_names = _layers_to_check full_model = mobilenet_v2.mobilenet_v2( batchnorm_training=batchnorm_training, conv_hyperparams=conv_hyperparams, weights=None, use_explicit_padding=use_explicit_padding, alpha=alpha, min_depth=min_depth, include_top=False, conv_defs=conv_defs) layer_outputs = [full_model.get_layer(name=layer).output for layer in layer_names] return tf.keras.Model( inputs=full_model.inputs, outputs=layer_outputs) def _check_returns_correct_shape( self, batch_size, image_height, image_width, depth_multiplier, expected_feature_map_shapes, use_explicit_padding=False, min_depth=None, layer_names=None, conv_defs=None): model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=depth_multiplier, conv_defs=conv_defs) image_tensor = np.random.rand(batch_size, image_height, image_width, 3).astype(np.float32) feature_maps = model([image_tensor]) for feature_map, expected_shape in zip(feature_maps, expected_feature_map_shapes): self.assertAllEqual(feature_map.shape, expected_shape) def _check_returns_correct_shapes_with_dynamic_inputs( self, batch_size, image_height, image_width, depth_multiplier, expected_feature_map_shapes, use_explicit_padding=False, layer_names=None): height = tf.random.uniform([], minval=image_height, maxval=image_height+1, dtype=tf.int32) width = tf.random.uniform([], minval=image_width, maxval=image_width+1, dtype=tf.int32) image_tensor = tf.random.uniform([batch_size, height, width, 3], dtype=tf.float32) model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=use_explicit_padding, alpha=depth_multiplier) feature_maps = model(image_tensor) for feature_map, expected_shape in zip(feature_maps, expected_feature_map_shapes): self.assertAllEqual(feature_map.shape, expected_shape) def _get_variables(self, depth_multiplier, layer_names=None): tf.keras.backend.clear_session() model = self._create_application_with_layer_outputs( layer_names=layer_names, batchnorm_training=False, use_explicit_padding=False, alpha=depth_multiplier) preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) model(preprocessed_inputs) return model.variables def test_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v2_expected_feature_map_shape_128) self._check_returns_correct_shape( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_128_explicit_padding( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v2_expected_feature_map_shape_128_explicit_padding) self._check_returns_correct_shape( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape, use_explicit_padding=True) def test_returns_correct_shapes_with_dynamic_inputs( self): image_height = 128 image_width = 128 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs) self._check_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 expected_feature_map_shape = ( test_utils.moblenet_v2_expected_feature_map_shape_299) self._check_returns_correct_shape( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape) def test_returns_correct_shapes_enforcing_min_depth( self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 expected_feature_map_shape = ( test_utils.moblenet_v2_expected_feature_map_shape_enforcing_min_depth) self._check_returns_correct_shape( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape, min_depth=32) def test_returns_correct_shapes_with_conv_defs( self): image_height = 299 image_width = 299 depth_multiplier = 1.0 conv_1 = model_utils.ConvDefs( conv_name='Conv_1', filters=256) conv_defs = [conv_1] expected_feature_map_shape = ( test_utils.moblenet_v2_expected_feature_map_shape_with_conv_defs) self._check_returns_correct_shape( 2, image_height, image_width, depth_multiplier, expected_feature_map_shape, conv_defs=conv_defs) def test_hyperparam_override(self): hyperparams = self._build_conv_hyperparams() model = mobilenet_v2.mobilenet_v2( batchnorm_training=True, conv_hyperparams=hyperparams, weights=None, use_explicit_padding=False, alpha=1.0, min_depth=32, include_top=False) hyperparams.params() bn_layer = model.get_layer(name='block_5_project_BN') self.assertAllClose(bn_layer.momentum, 0.2) self.assertAllClose(bn_layer.epsilon, 0.1) def test_variable_count(self): depth_multiplier = 1 variables = self._get_variables(depth_multiplier) self.assertEqual(len(variables), 260) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v2_tf2_test.py
mobilenet_v2_tf2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for Keras models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tensorflow.compat.v1 as tf # This is to specify the custom config of model structures. For example, # ConvDefs(conv_name='conv_pw_12', filters=512) for Mobilenet V1 is to specify # the filters of the conv layer with name 'conv_pw_12' as 512.s ConvDefs = collections.namedtuple('ConvDefs', ['conv_name', 'filters']) def get_conv_def(conv_defs, layer_name): """Get the custom config for some layer of the model structure. Args: conv_defs: A named tuple to specify the custom config of the model network. See `ConvDefs` for details. layer_name: A string, the name of the layer to be customized. Returns: The number of filters for the layer, or `None` if there is no custom config for the requested layer. """ for conv_def in conv_defs: if layer_name == conv_def.conv_name: return conv_def.filters return None def input_layer(shape, placeholder_with_default): if tf.executing_eagerly(): return tf.keras.layers.Input(shape=shape) else: return tf.keras.layers.Input(tensor=placeholder_with_default)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/model_utils.py
model_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Square box coder. Square box coder follows the coding schema described below: l = sqrt(h * w) la = sqrt(ha * wa) ty = (y - ya) / la tx = (x - xa) / la tl = log(l / la) where x, y, w, h denote the box's center coordinates, width, and height, respectively. Similarly, xa, ya, wa, ha denote the anchor's center coordinates, width and height. tx, ty, tl denote the anchor-encoded center, and length, respectively. Because the encoded box is a square, only one length is encoded. This has shown to provide performance improvements over the Faster RCNN box coder when the objects being detected tend to be square (e.g. faces) and when the input images are not distorted via resizing. """ import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list EPSILON = 1e-8 class SquareBoxCoder(box_coder.BoxCoder): """Encodes a 3-scalar representation of a square box.""" def __init__(self, scale_factors=None): """Constructor for SquareBoxCoder. Args: scale_factors: List of 3 positive scalars to scale ty, tx, and tl. If set to None, does not perform scaling. For faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0]. Raises: ValueError: If scale_factors is not length 3 or contains values less than or equal to 0. """ if scale_factors: if len(scale_factors) != 3: raise ValueError('The argument scale_factors must be a list of length ' '3.') if any(scalar <= 0 for scalar in scale_factors): raise ValueError('The values in scale_factors must all be greater ' 'than 0.') self._scale_factors = scale_factors @property def code_size(self): return 3 def _encode(self, boxes, anchors): """Encodes a box collection with respect to an anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, tl]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() la = tf.sqrt(ha * wa) ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() l = tf.sqrt(h * w) # Avoid NaN in division and log below. la += EPSILON l += EPSILON tx = (xcenter - xcenter_a) / la ty = (ycenter - ycenter_a) / la tl = tf.log(l / la) # Scales location targets for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] tl *= self._scale_factors[2] return tf.transpose(tf.stack([ty, tx, tl])) def _decode(self, rel_codes, anchors): """Decodes relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() la = tf.sqrt(ha * wa) ty, tx, tl = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] tl /= self._scale_factors[2] l = tf.exp(tl) * la ycenter = ty * la + ycenter_a xcenter = tx * la + xcenter_a ymin = ycenter - l / 2. xmin = xcenter - l / 2. ymax = ycenter + l / 2. xmax = xcenter + l / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/square_box_coder.py
square_box_coder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mean stddev box coder. This box coder use the following coding schema to encode boxes: rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev. """ from object_detection.core import box_coder from object_detection.core import box_list class MeanStddevBoxCoder(box_coder.BoxCoder): """Mean stddev box coder.""" def __init__(self, stddev=0.01): """Constructor for MeanStddevBoxCoder. Args: stddev: The standard deviation used to encode and decode boxes. """ self._stddev = stddev @property def code_size(self): return 4 def _encode(self, boxes, anchors): """Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of N anchors. Returns: a tensor representing N anchor-encoded boxes Raises: ValueError: if the anchors still have deprecated stddev field. """ box_corners = boxes.get() if anchors.has_field('stddev'): raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " "should not be specified in the box list.") means = anchors.get() return (box_corners - means) / self._stddev def _decode(self, rel_codes, anchors): """Decode. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes Raises: ValueError: if the anchors still have deprecated stddev field and expects the decode method to use stddev value from that field. """ means = anchors.get() if anchors.has_field('stddev'): raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " "should not be specified in the box list.") box_corners = rel_codes * self._stddev + means return box_list.BoxList(box_corners)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/mean_stddev_box_coder.py
mean_stddev_box_coder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.box_coder.faster_rcnn_box_coder.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.box_coders import faster_rcnn_box_coder from object_detection.core import box_list from object_detection.utils import test_case class FasterRcnnBoxCoderTest(test_case.TestCase): def test_get_correct_relative_codes_after_encoding(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [[-0.5, -0.416666, -0.405465, -0.182321], [-0.083333, -0.222222, -0.693147, -1.098612]] def graph_fn(boxes, anchors): boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_get_correct_relative_codes_after_encoding_with_scaling(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [[-1., -1.25, -1.62186, -0.911608], [-0.166667, -0.666667, -2.772588, -5.493062]] def graph_fn(boxes, anchors): scale_factors = [2, 3, 4, 5] boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=scale_factors) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_get_correct_boxes_after_decoding(self): anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array([[-0.5, -0.416666, -0.405465, -0.182321], [-0.083333, -0.222222, -0.693147, -1.098612]], np.float32) expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] def graph_fn(rel_codes, anchors): anchors = box_list.BoxList(anchors) coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() boxes = coder.decode(rel_codes, anchors) return boxes.get() boxes_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, atol=1e-04) def test_get_correct_boxes_after_decoding_with_scaling(self): anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array([[-1., -1.25, -1.62186, -0.911608], [-0.166667, -0.666667, -2.772588, -5.493062]], np.float32) expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] def graph_fn(rel_codes, anchors): scale_factors = [2, 3, 4, 5] anchors = box_list.BoxList(anchors) coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=scale_factors) boxes = coder.decode(rel_codes, anchors).get() return boxes boxes_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(expected_boxes, boxes_out, rtol=1e-04, atol=1e-04) def test_very_small_Width_nan_after_encoding(self): boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826]] def graph_fn(boxes, anchors): boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/faster_rcnn_box_coder_test.py
faster_rcnn_box_coder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.box_coder.square_box_coder.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.box_coders import square_box_coder from object_detection.core import box_list from object_detection.utils import test_case class SquareBoxCoderTest(test_case.TestCase): def test_correct_relative_codes_with_default_scale(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [[-0.790569, -0.263523, -0.293893], [-0.068041, -0.272166, -0.89588]] def graph_fn(boxes, anchors): scale_factors = None boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_correct_relative_codes_with_non_default_scale(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) expected_rel_codes = [[-1.581139, -0.790569, -1.175573], [-0.136083, -0.816497, -3.583519]] def graph_fn(boxes, anchors): scale_factors = [2, 3, 4] boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-03, atol=1e-03) def test_correct_relative_codes_with_small_width(self): boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) expected_rel_codes = [[-1.317616, 0., -20.670586]] def graph_fn(boxes, anchors): scale_factors = None boxes = box_list.BoxList(boxes) anchors = box_list.BoxList(anchors) coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) rel_codes = coder.encode(boxes, anchors) return rel_codes rel_codes_out = self.execute(graph_fn, [boxes, anchors]) self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, atol=1e-04) def test_correct_boxes_with_default_scale(self): anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array([[-0.5, -0.416666, -0.405465], [-0.083333, -0.222222, -0.693147]], np.float32) expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], [0.155051, 0.102989, 0.522474, 0.470412]] def graph_fn(rel_codes, anchors): scale_factors = None anchors = box_list.BoxList(anchors) coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) boxes = coder.decode(rel_codes, anchors).get() return boxes boxes_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, atol=1e-04) def test_correct_boxes_with_non_default_scale(self): anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], np.float32) rel_codes = np.array( [[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]], np.float32) expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], [0.155051, 0.102989, 0.522474, 0.470412]] def graph_fn(rel_codes, anchors): scale_factors = [2, 3, 4] anchors = box_list.BoxList(anchors) coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) boxes = coder.decode(rel_codes, anchors).get() return boxes boxes_out = self.execute(graph_fn, [rel_codes, anchors]) self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, atol=1e-04) if __name__ == '__main__': tf.test.main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/square_box_coder_test.py
square_box_coder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Faster RCNN box coder. Faster RCNN box coder follows the coding schema described below: ty = (y - ya) / ha tx = (x - xa) / wa th = log(h / ha) tw = log(w / wa) where x, y, w, h denote the box's center coordinates, width and height respectively. Similarly, xa, ya, wa, ha denote the anchor's center coordinates, width and height. tx, ty, tw and th denote the anchor-encoded center, width and height respectively. See http://arxiv.org/abs/1506.01497 for details. """ import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list EPSILON = 1e-8 class FasterRcnnBoxCoder(box_coder.BoxCoder): """Faster RCNN box coder.""" def __init__(self, scale_factors=None): """Constructor for FasterRcnnBoxCoder. Args: scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If set to None, does not perform scaling. For Faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. """ if scale_factors: assert len(scale_factors) == 4 for scalar in scale_factors: assert scalar > 0 self._scale_factors = scale_factors @property def code_size(self): return 4 def _encode(self, boxes, anchors): """Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.log(w / wa) th = tf.log(h / ha) # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] return tf.transpose(tf.stack([ty, tx, th, tw])) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/faster_rcnn_box_coder.py
faster_rcnn_box_coder.py