code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
r"""A Beam job to add contextual memory banks to tf.Examples. This tool groups images containing bounding boxes and embedded context features by a key, either `image/location` or `image/seq_id`, and time horizon, then uses these groups to build up a contextual memory bank from the embedded context features from each image in the group and adds that context to the output tf.Examples for each image in the group. Steps to generate a dataset with context from one with bounding boxes and embedded context features: 1. Use object/detection/export_inference_graph.py to get a `saved_model` for inference. The input node must accept a tf.Example proto. 2. Run this tool with `saved_model` from step 1 and a TFRecord of tf.Example protos containing images, bounding boxes, and embedded context features. The context features can be added to tf.Examples using generate_embedding_data.py. Example Usage: -------------- python add_context_to_examples.py \ --input_tfrecord path/to/input_tfrecords* \ --output_tfrecord path/to/output_tfrecords \ --sequence_key image/location \ --time_horizon month """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import copy import datetime import io import itertools import json import os import numpy as np import PIL.Image import six import tensorflow as tf try: import apache_beam as beam # pylint:disable=g-import-not-at-top except ModuleNotFoundError: pass class ReKeyDataFn(beam.DoFn): """Re-keys tfrecords by sequence_key. This Beam DoFn re-keys the tfrecords by a user-defined sequence_key """ def __init__(self, sequence_key, time_horizon, reduce_image_size, max_image_dimension): """Initialization function. Args: sequence_key: A feature name to use as a key for grouping sequences. Must point to a key of type bytes_list time_horizon: What length of time to use to partition the data when building the memory banks. Options: `year`, `month`, `week`, `day `, `hour`, `minute`, None reduce_image_size: Whether to reduce the sizes of the stored images. max_image_dimension: maximum dimension of reduced images """ self._sequence_key = sequence_key if time_horizon is None or time_horizon in {'year', 'month', 'week', 'day', 'hour', 'minute'}: self._time_horizon = time_horizon else: raise ValueError('Time horizon not supported.') self._reduce_image_size = reduce_image_size self._max_image_dimension = max_image_dimension self._session = None self._num_examples_processed = beam.metrics.Metrics.counter( 'data_rekey', 'num_tf_examples_processed') self._num_images_resized = beam.metrics.Metrics.counter( 'data_rekey', 'num_images_resized') self._num_images_read = beam.metrics.Metrics.counter( 'data_rekey', 'num_images_read') self._num_images_found = beam.metrics.Metrics.counter( 'data_rekey', 'num_images_read') self._num_got_shape = beam.metrics.Metrics.counter( 'data_rekey', 'num_images_got_shape') self._num_images_found_size = beam.metrics.Metrics.counter( 'data_rekey', 'num_images_found_size') self._num_examples_cleared = beam.metrics.Metrics.counter( 'data_rekey', 'num_examples_cleared') self._num_examples_updated = beam.metrics.Metrics.counter( 'data_rekey', 'num_examples_updated') def process(self, tfrecord_entry): return self._rekey_examples(tfrecord_entry) def _largest_size_at_most(self, height, width, largest_side): """Computes new shape with the largest side equal to `largest_side`. Args: height: an int indicating the current height. width: an int indicating the current width. largest_side: A python integer indicating the size of the largest side after resize. Returns: new_height: an int indicating the new height. new_width: an int indicating the new width. """ x_scale = float(largest_side) / float(width) y_scale = float(largest_side) / float(height) scale = min(x_scale, y_scale) new_width = int(width * scale) new_height = int(height * scale) return new_height, new_width def _resize_image(self, input_example): """Resizes the image within input_example and updates the height and width. Args: input_example: A tf.Example that we want to update to contain a resized image. Returns: input_example: Updated tf.Example. """ original_image = copy.deepcopy( input_example.features.feature['image/encoded'].bytes_list.value[0]) self._num_images_read.inc(1) height = copy.deepcopy( input_example.features.feature['image/height'].int64_list.value[0]) width = copy.deepcopy( input_example.features.feature['image/width'].int64_list.value[0]) self._num_got_shape.inc(1) new_height, new_width = self._largest_size_at_most( height, width, self._max_image_dimension) self._num_images_found_size.inc(1) encoded_jpg_io = io.BytesIO(original_image) image = PIL.Image.open(encoded_jpg_io) resized_image = image.resize((new_width, new_height)) with io.BytesIO() as output: resized_image.save(output, format='JPEG') encoded_resized_image = output.getvalue() self._num_images_resized.inc(1) del input_example.features.feature['image/encoded'].bytes_list.value[:] del input_example.features.feature['image/height'].int64_list.value[:] del input_example.features.feature['image/width'].int64_list.value[:] self._num_examples_cleared.inc(1) input_example.features.feature['image/encoded'].bytes_list.value.extend( [encoded_resized_image]) input_example.features.feature['image/height'].int64_list.value.extend( [new_height]) input_example.features.feature['image/width'].int64_list.value.extend( [new_width]) self._num_examples_updated.inc(1) return input_example def _rekey_examples(self, tfrecord_entry): serialized_example = copy.deepcopy(tfrecord_entry) input_example = tf.train.Example.FromString(serialized_example) self._num_images_found.inc(1) if self._reduce_image_size: input_example = self._resize_image(input_example) self._num_images_resized.inc(1) new_key = input_example.features.feature[ self._sequence_key].bytes_list.value[0] if self._time_horizon: date_captured = datetime.datetime.strptime( six.ensure_str(input_example.features.feature[ 'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S') year = date_captured.year month = date_captured.month day = date_captured.day week = np.floor(float(day) / float(7)) hour = date_captured.hour minute = date_captured.minute if self._time_horizon == 'year': new_key = new_key + six.ensure_binary('/' + str(year)) elif self._time_horizon == 'month': new_key = new_key + six.ensure_binary( '/' + str(year) + '/' + str(month)) elif self._time_horizon == 'week': new_key = new_key + six.ensure_binary( '/' + str(year) + '/' + str(month) + '/' + str(week)) elif self._time_horizon == 'day': new_key = new_key + six.ensure_binary( '/' + str(year) + '/' + str(month) + '/' + str(day)) elif self._time_horizon == 'hour': new_key = new_key + six.ensure_binary( '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( str(hour))) elif self._time_horizon == 'minute': new_key = new_key + six.ensure_binary( '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( str(hour) + '/' + str(minute))) self._num_examples_processed.inc(1) return [(new_key, input_example)] class SortGroupedDataFn(beam.DoFn): """Sorts data within a keyed group. This Beam DoFn sorts the grouped list of image examples by frame_num """ def __init__(self, sequence_key, sorted_image_ids, max_num_elements_in_context_features): """Initialization function. Args: sequence_key: A feature name to use as a key for grouping sequences. Must point to a key of type bytes_list sorted_image_ids: Whether the image ids are sortable to use as sorting tie-breakers max_num_elements_in_context_features: The maximum number of elements allowed in the memory bank """ self._session = None self._num_examples_processed = beam.metrics.Metrics.counter( 'sort_group', 'num_groups_sorted') self._too_many_elements = beam.metrics.Metrics.counter( 'sort_group', 'too_many_elements') self._split_elements = beam.metrics.Metrics.counter( 'sort_group', 'split_elements') self._sequence_key = six.ensure_binary(sequence_key) self._sorted_image_ids = sorted_image_ids self._max_num_elements_in_context_features = ( max_num_elements_in_context_features) def process(self, grouped_entry): return self._sort_image_examples(grouped_entry) def _sort_image_examples(self, grouped_entry): key, example_collection = grouped_entry example_list = list(example_collection) def get_frame_num(example): return example.features.feature['image/seq_frame_num'].int64_list.value[0] def get_date_captured(example): return datetime.datetime.strptime( six.ensure_str( example.features.feature[ 'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S') def get_image_id(example): return example.features.feature['image/source_id'].bytes_list.value[0] if self._sequence_key == six.ensure_binary('image/seq_id'): sorting_fn = get_frame_num elif self._sequence_key == six.ensure_binary('image/location'): if self._sorted_image_ids: sorting_fn = get_image_id else: sorting_fn = get_date_captured sorted_example_list = sorted(example_list, key=sorting_fn) num_embeddings = 0 for example in sorted_example_list: num_embeddings += example.features.feature[ 'image/embedding_count'].int64_list.value[0] self._num_examples_processed.inc(1) # To handle cases where there are more context embeddings within # the time horizon than the specified maximum, we split the context group # into subsets sequentially in time, with each subset having the maximum # number of context embeddings except the final one, which holds the # remainder. if num_embeddings > self._max_num_elements_in_context_features: leftovers = sorted_example_list output_list = [] count = 0 self._too_many_elements.inc(1) num_embeddings = 0 max_idx = 0 for idx, example in enumerate(leftovers): num_embeddings += example.features.feature[ 'image/embedding_count'].int64_list.value[0] if num_embeddings <= self._max_num_elements_in_context_features: max_idx = idx while num_embeddings > self._max_num_elements_in_context_features: self._split_elements.inc(1) new_key = key + six.ensure_binary('_' + str(count)) new_list = leftovers[:max_idx] output_list.append((new_key, new_list)) leftovers = leftovers[max_idx:] count += 1 num_embeddings = 0 max_idx = 0 for idx, example in enumerate(leftovers): num_embeddings += example.features.feature[ 'image/embedding_count'].int64_list.value[0] if num_embeddings <= self._max_num_elements_in_context_features: max_idx = idx new_key = key + six.ensure_binary('_' + str(count)) output_list.append((new_key, leftovers)) else: output_list = [(key, sorted_example_list)] return output_list def get_sliding_window(example_list, max_clip_length, stride_length): """Yields a sliding window over data from example_list. Sliding window has width max_clip_len (n) and stride stride_len (m). s -> (s0,s1,...s[n-1]), (s[m],s[m+1],...,s[m+n]), ... Args: example_list: A list of examples. max_clip_length: The maximum length of each clip. stride_length: The stride between each clip. Yields: A list of lists of examples, each with length <= max_clip_length """ # check if the list is too short to slide over if len(example_list) < max_clip_length: yield example_list else: starting_values = [i*stride_length for i in range(len(example_list)) if len(example_list) > i*stride_length] for start in starting_values: result = tuple(itertools.islice(example_list, start, min(start + max_clip_length, len(example_list)))) yield result class GenerateContextFn(beam.DoFn): """Generates context data for camera trap images. This Beam DoFn builds up contextual memory banks from groups of images and stores them in the output tf.Example or tf.Sequence_example for each image. """ def __init__(self, sequence_key, add_context_features, image_ids_to_keep, keep_context_features_image_id_list=False, subsample_context_features_rate=0, keep_only_positives=False, context_features_score_threshold=0.7, keep_only_positives_gt=False, max_num_elements_in_context_features=5000, pad_context_features=False, output_type='tf_example', max_clip_length=None, context_feature_length=2057): """Initialization function. Args: sequence_key: A feature name to use as a key for grouping sequences. add_context_features: Whether to keep and store the contextual memory bank. image_ids_to_keep: A list of image ids to save, to use to build data subsets for evaluation. keep_context_features_image_id_list: Whether to save an ordered list of the ids of the images in the contextual memory bank. subsample_context_features_rate: What rate to subsample images for the contextual memory bank. keep_only_positives: Whether to only keep high scoring (>context_features_score_threshold) features in the contextual memory bank. context_features_score_threshold: What threshold to use for keeping features. keep_only_positives_gt: Whether to only keep features from images that contain objects based on the ground truth (for training). max_num_elements_in_context_features: the maximum number of elements in the memory bank pad_context_features: Whether to pad the context features to a fixed size. output_type: What type of output, tf_example of tf_sequence_example max_clip_length: The maximum length of a sequence example, before splitting into multiple context_feature_length: The length of the context feature embeddings stored in the input data. """ self._session = None self._num_examples_processed = beam.metrics.Metrics.counter( 'sequence_data_generation', 'num_seq_examples_processed') self._num_keys_processed = beam.metrics.Metrics.counter( 'sequence_data_generation', 'num_keys_processed') self._sequence_key = sequence_key self._add_context_features = add_context_features self._pad_context_features = pad_context_features self._output_type = output_type self._max_clip_length = max_clip_length if six.ensure_str(image_ids_to_keep) == 'All': self._image_ids_to_keep = None else: with tf.io.gfile.GFile(image_ids_to_keep) as f: self._image_ids_to_keep = json.load(f) self._keep_context_features_image_id_list = ( keep_context_features_image_id_list) self._subsample_context_features_rate = subsample_context_features_rate self._keep_only_positives = keep_only_positives self._keep_only_positives_gt = keep_only_positives_gt self._context_features_score_threshold = context_features_score_threshold self._max_num_elements_in_context_features = ( max_num_elements_in_context_features) self._context_feature_length = context_feature_length self._images_kept = beam.metrics.Metrics.counter( 'sequence_data_generation', 'images_kept') self._images_loaded = beam.metrics.Metrics.counter( 'sequence_data_generation', 'images_loaded') def process(self, grouped_entry): return self._add_context_to_example(copy.deepcopy(grouped_entry)) def _build_context_features(self, example_list): context_features = [] context_features_image_id_list = [] count = 0 example_embedding = [] for idx, example in enumerate(example_list): if self._subsample_context_features_rate > 0: if (idx % self._subsample_context_features_rate) != 0: example.features.feature[ 'context_features_idx'].int64_list.value.append( self._max_num_elements_in_context_features + 1) continue if self._keep_only_positives: if example.features.feature[ 'image/embedding_score' ].float_list.value[0] < self._context_features_score_threshold: example.features.feature[ 'context_features_idx'].int64_list.value.append( self._max_num_elements_in_context_features + 1) continue if self._keep_only_positives_gt: if len(example.features.feature[ 'image/object/bbox/xmin' ].float_list.value) < 1: example.features.feature[ 'context_features_idx'].int64_list.value.append( self._max_num_elements_in_context_features + 1) continue example_embedding = list(example.features.feature[ 'image/embedding'].float_list.value) context_features.extend(example_embedding) num_embeddings = example.features.feature[ 'image/embedding_count'].int64_list.value[0] example_image_id = example.features.feature[ 'image/source_id'].bytes_list.value[0] for _ in range(num_embeddings): example.features.feature[ 'context_features_idx'].int64_list.value.append(count) count += 1 context_features_image_id_list.append(example_image_id) if not example_embedding: example_embedding.append(np.zeros(self._context_feature_length)) feature_length = self._context_feature_length # If the example_list is not empty and image/embedding_length is in the # featture dict, feature_length will be assigned to that. Otherwise, it will # be kept as default. if example_list and ( 'image/embedding_length' in example_list[0].features.feature): feature_length = example_list[0].features.feature[ 'image/embedding_length'].int64_list.value[0] if self._pad_context_features: while len(context_features_image_id_list) < ( self._max_num_elements_in_context_features): context_features_image_id_list.append('') return context_features, feature_length, context_features_image_id_list def _add_context_to_example(self, grouped_entry): key, example_collection = grouped_entry list_of_examples = [] example_list = list(example_collection) if self._add_context_features: context_features, feature_length, context_features_image_id_list = ( self._build_context_features(example_list)) if self._image_ids_to_keep is not None: new_example_list = [] for example in example_list: im_id = example.features.feature['image/source_id'].bytes_list.value[0] self._images_loaded.inc(1) if six.ensure_str(im_id) in self._image_ids_to_keep: self._images_kept.inc(1) new_example_list.append(example) if new_example_list: example_list = new_example_list else: return [] if self._output_type == 'tf_sequence_example': if self._max_clip_length is not None: # For now, no overlap clips = get_sliding_window( example_list, self._max_clip_length, self._max_clip_length) else: clips = [example_list] for clip_num, clip_list in enumerate(clips): # initialize sequence example seq_example = tf.train.SequenceExample() video_id = six.ensure_str(key)+'_'+ str(clip_num) seq_example.context.feature['clip/media_id'].bytes_list.value.append( video_id.encode('utf8')) seq_example.context.feature['clip/frames'].int64_list.value.append( len(clip_list)) seq_example.context.feature[ 'clip/start/timestamp'].int64_list.value.append(0) seq_example.context.feature[ 'clip/end/timestamp'].int64_list.value.append(len(clip_list)) seq_example.context.feature['image/format'].bytes_list.value.append( six.ensure_binary('JPG')) seq_example.context.feature['image/channels'].int64_list.value.append(3) context_example = clip_list[0] seq_example.context.feature['image/height'].int64_list.value.append( context_example.features.feature[ 'image/height'].int64_list.value[0]) seq_example.context.feature['image/width'].int64_list.value.append( context_example.features.feature['image/width'].int64_list.value[0]) seq_example.context.feature[ 'image/context_feature_length'].int64_list.value.append( feature_length) seq_example.context.feature[ 'image/context_features'].float_list.value.extend( context_features) if self._keep_context_features_image_id_list: seq_example.context.feature[ 'image/context_features_image_id_list'].bytes_list.value.extend( context_features_image_id_list) encoded_image_list = seq_example.feature_lists.feature_list[ 'image/encoded'] timestamps_list = seq_example.feature_lists.feature_list[ 'image/timestamp'] context_features_idx_list = seq_example.feature_lists.feature_list[ 'image/context_features_idx'] date_captured_list = seq_example.feature_lists.feature_list[ 'image/date_captured'] unix_time_list = seq_example.feature_lists.feature_list[ 'image/unix_time'] location_list = seq_example.feature_lists.feature_list['image/location'] image_ids_list = seq_example.feature_lists.feature_list[ 'image/source_id'] gt_xmin_list = seq_example.feature_lists.feature_list[ 'region/bbox/xmin'] gt_xmax_list = seq_example.feature_lists.feature_list[ 'region/bbox/xmax'] gt_ymin_list = seq_example.feature_lists.feature_list[ 'region/bbox/ymin'] gt_ymax_list = seq_example.feature_lists.feature_list[ 'region/bbox/ymax'] gt_type_list = seq_example.feature_lists.feature_list[ 'region/label/index'] gt_type_string_list = seq_example.feature_lists.feature_list[ 'region/label/string'] gt_is_annotated_list = seq_example.feature_lists.feature_list[ 'region/is_annotated'] for idx, example in enumerate(clip_list): encoded_image = encoded_image_list.feature.add() encoded_image.bytes_list.value.extend( example.features.feature['image/encoded'].bytes_list.value) image_id = image_ids_list.feature.add() image_id.bytes_list.value.append( example.features.feature['image/source_id'].bytes_list.value[0]) timestamp = timestamps_list.feature.add() # Timestamp is currently order in the list. timestamp.int64_list.value.extend([idx]) context_features_idx = context_features_idx_list.feature.add() context_features_idx.int64_list.value.extend( example.features.feature['context_features_idx'].int64_list.value) date_captured = date_captured_list.feature.add() date_captured.bytes_list.value.extend( example.features.feature['image/date_captured'].bytes_list.value) unix_time = unix_time_list.feature.add() unix_time.float_list.value.extend( example.features.feature['image/unix_time'].float_list.value) location = location_list.feature.add() location.bytes_list.value.extend( example.features.feature['image/location'].bytes_list.value) gt_xmin = gt_xmin_list.feature.add() gt_xmax = gt_xmax_list.feature.add() gt_ymin = gt_ymin_list.feature.add() gt_ymax = gt_ymax_list.feature.add() gt_type = gt_type_list.feature.add() gt_type_str = gt_type_string_list.feature.add() gt_is_annotated = gt_is_annotated_list.feature.add() gt_is_annotated.int64_list.value.append(1) gt_xmin.float_list.value.extend( example.features.feature[ 'image/object/bbox/xmin'].float_list.value) gt_xmax.float_list.value.extend( example.features.feature[ 'image/object/bbox/xmax'].float_list.value) gt_ymin.float_list.value.extend( example.features.feature[ 'image/object/bbox/ymin'].float_list.value) gt_ymax.float_list.value.extend( example.features.feature[ 'image/object/bbox/ymax'].float_list.value) gt_type.int64_list.value.extend( example.features.feature[ 'image/object/class/label'].int64_list.value) gt_type_str.bytes_list.value.extend( example.features.feature[ 'image/object/class/text'].bytes_list.value) self._num_examples_processed.inc(1) list_of_examples.append(seq_example) elif self._output_type == 'tf_example': for example in example_list: im_id = example.features.feature['image/source_id'].bytes_list.value[0] if self._add_context_features: example.features.feature[ 'image/context_features'].float_list.value.extend( context_features) example.features.feature[ 'image/context_feature_length'].int64_list.value.append( feature_length) if self._keep_context_features_image_id_list: example.features.feature[ 'image/context_features_image_id_list'].bytes_list.value.extend( context_features_image_id_list) self._num_examples_processed.inc(1) list_of_examples.append(example) return list_of_examples def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, sequence_key, time_horizon=None, subsample_context_features_rate=0, reduce_image_size=True, max_image_dimension=1024, add_context_features=True, sorted_image_ids=True, image_ids_to_keep='All', keep_context_features_image_id_list=False, keep_only_positives=False, context_features_score_threshold=0.7, keep_only_positives_gt=False, max_num_elements_in_context_features=5000, num_shards=0, output_type='tf_example', max_clip_length=None, context_feature_length=2057): """Returns a beam pipeline to run object detection inference. Args: pipeline: Initialized beam pipeline. input_tfrecord: An TFRecord of tf.train.Example protos containing images. output_tfrecord: An TFRecord of tf.train.Example protos that contain images in the input TFRecord and the detections from the model. sequence_key: A feature name to use as a key for grouping sequences. time_horizon: What length of time to use to partition the data when building the memory banks. Options: `year`, `month`, `week`, `day `, `hour`, `minute`, None. subsample_context_features_rate: What rate to subsample images for the contextual memory bank. reduce_image_size: Whether to reduce the size of the stored images. max_image_dimension: The maximum image dimension to use for resizing. add_context_features: Whether to keep and store the contextual memory bank. sorted_image_ids: Whether the image ids are sortable, and can be used as datetime tie-breakers when building memory banks. image_ids_to_keep: A list of image ids to save, to use to build data subsets for evaluation. keep_context_features_image_id_list: Whether to save an ordered list of the ids of the images in the contextual memory bank. keep_only_positives: Whether to only keep high scoring (>context_features_score_threshold) features in the contextual memory bank. context_features_score_threshold: What threshold to use for keeping features. keep_only_positives_gt: Whether to only keep features from images that contain objects based on the ground truth (for training). max_num_elements_in_context_features: the maximum number of elements in the memory bank num_shards: The number of output shards. output_type: What type of output, tf_example of tf_sequence_example max_clip_length: The maximum length of a sequence example, before splitting into multiple context_feature_length: The length of the context feature embeddings stored in the input data. """ if output_type == 'tf_example': coder = beam.coders.ProtoCoder(tf.train.Example) elif output_type == 'tf_sequence_example': coder = beam.coders.ProtoCoder(tf.train.SequenceExample) else: raise ValueError('Unsupported output type.') input_collection = ( pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( input_tfrecord, coder=beam.coders.BytesCoder())) rekey_collection = input_collection | 'RekeyExamples' >> beam.ParDo( ReKeyDataFn(sequence_key, time_horizon, reduce_image_size, max_image_dimension)) grouped_collection = ( rekey_collection | 'GroupBySequenceKey' >> beam.GroupByKey()) grouped_collection = ( grouped_collection | 'ReshuffleGroups' >> beam.Reshuffle()) ordered_collection = ( grouped_collection | 'OrderByFrameNumber' >> beam.ParDo( SortGroupedDataFn(sequence_key, sorted_image_ids, max_num_elements_in_context_features))) ordered_collection = ( ordered_collection | 'ReshuffleSortedGroups' >> beam.Reshuffle()) output_collection = ( ordered_collection | 'AddContextToExamples' >> beam.ParDo( GenerateContextFn( sequence_key, add_context_features, image_ids_to_keep, keep_context_features_image_id_list=( keep_context_features_image_id_list), subsample_context_features_rate=subsample_context_features_rate, keep_only_positives=keep_only_positives, keep_only_positives_gt=keep_only_positives_gt, context_features_score_threshold=( context_features_score_threshold), max_num_elements_in_context_features=( max_num_elements_in_context_features), output_type=output_type, max_clip_length=max_clip_length, context_feature_length=context_feature_length))) output_collection = ( output_collection | 'ReshuffleExamples' >> beam.Reshuffle()) _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( output_tfrecord, num_shards=num_shards, coder=coder) def parse_args(argv): """Command-line argument parser. Args: argv: command line arguments Returns: beam_args: Arguments for the beam pipeline. pipeline_args: Arguments for the pipeline options, such as runner type. """ parser = argparse.ArgumentParser() parser.add_argument( '--input_tfrecord', dest='input_tfrecord', required=True, help='TFRecord containing images in tf.Example format for object ' 'detection, with bounding boxes and contextual feature embeddings.') parser.add_argument( '--output_tfrecord', dest='output_tfrecord', required=True, help='TFRecord containing images in tf.Example format, with added ' 'contextual memory banks.') parser.add_argument( '--sequence_key', dest='sequence_key', default='image/location', help='Key to use when grouping sequences: so far supports `image/seq_id` ' 'and `image/location`.') parser.add_argument( '--context_feature_length', dest='context_feature_length', default=2057, help='The length of the context feature embeddings stored in the input ' 'data.') parser.add_argument( '--time_horizon', dest='time_horizon', default=None, help='What time horizon to use when splitting the data, if any. Options ' 'are: `year`, `month`, `week`, `day `, `hour`, `minute`, `None`.') parser.add_argument( '--subsample_context_features_rate', dest='subsample_context_features_rate', default=0, help='Whether to subsample the context_features, and if so how many to ' 'sample. If the rate is set to X, it will sample context from 1 out of ' 'every X images. Default is sampling from every image, which is X=0.') parser.add_argument( '--reduce_image_size', dest='reduce_image_size', default=True, help='downsamples images to have longest side max_image_dimension, ' 'maintaining aspect ratio') parser.add_argument( '--max_image_dimension', dest='max_image_dimension', default=1024, help='Sets max image dimension for resizing.') parser.add_argument( '--add_context_features', dest='add_context_features', default=True, help='Adds a memory bank of embeddings to each clip') parser.add_argument( '--sorted_image_ids', dest='sorted_image_ids', default=True, help='Whether the image source_ids are sortable to deal with ' 'date_captured tie-breaks.') parser.add_argument( '--image_ids_to_keep', dest='image_ids_to_keep', default='All', help='Path to .json list of image ids to keep, used for ground truth ' 'eval creation.') parser.add_argument( '--keep_context_features_image_id_list', dest='keep_context_features_image_id_list', default=False, help='Whether or not to keep a list of the image_ids corresponding to ' 'the memory bank.') parser.add_argument( '--keep_only_positives', dest='keep_only_positives', default=False, help='Whether or not to keep only positive boxes based on score.') parser.add_argument( '--context_features_score_threshold', dest='context_features_score_threshold', default=0.7, help='What score threshold to use for boxes in context_features, when ' '`keep_only_positives` is set to `True`.') parser.add_argument( '--keep_only_positives_gt', dest='keep_only_positives_gt', default=False, help='Whether or not to keep only positive boxes based on gt class.') parser.add_argument( '--max_num_elements_in_context_features', dest='max_num_elements_in_context_features', default=2000, help='Sets max number of context feature elements per memory bank. ' 'If the number of images in the context group is greater than ' '`max_num_elements_in_context_features`, the context group will be split.' ) parser.add_argument( '--output_type', dest='output_type', default='tf_example', help='Output type, one of `tf_example`, `tf_sequence_example`.') parser.add_argument( '--max_clip_length', dest='max_clip_length', default=None, help='Max length for sequence example outputs.') parser.add_argument( '--num_shards', dest='num_shards', default=0, help='Number of output shards.') beam_args, pipeline_args = parser.parse_known_args(argv) return beam_args, pipeline_args def main(argv=None, save_main_session=True): """Runs the Beam pipeline that performs inference. Args: argv: Command line arguments. save_main_session: Whether to save the main session. """ args, pipeline_args = parse_args(argv) pipeline_options = beam.options.pipeline_options.PipelineOptions( pipeline_args) pipeline_options.view_as( beam.options.pipeline_options.SetupOptions).save_main_session = ( save_main_session) dirname = os.path.dirname(args.output_tfrecord) tf.io.gfile.makedirs(dirname) p = beam.Pipeline(options=pipeline_options) construct_pipeline( p, args.input_tfrecord, args.output_tfrecord, args.sequence_key, args.time_horizon, args.subsample_context_features_rate, args.reduce_image_size, args.max_image_dimension, args.add_context_features, args.sorted_image_ids, args.image_ids_to_keep, args.keep_context_features_image_id_list, args.keep_only_positives, args.context_features_score_threshold, args.keep_only_positives_gt, args.max_num_elements_in_context_features, args.num_shards, args.output_type, args.max_clip_length, args.context_feature_length) p.run() if __name__ == '__main__': main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py
add_context_to_examples.py
r"""A Beam job to generate embedding data for camera trap images. This tool runs inference with an exported Object Detection model in `saved_model` format and produce raw embeddings for camera trap data. These embeddings contain an object-centric feature embedding from Faster R-CNN, the datetime that the image was taken (normalized in a specific way), and the position of the object of interest. By default, only the highest-scoring object embedding is included. Steps to generate a embedding dataset: 1. Use object_detection/export_inference_graph.py to get a Faster R-CNN `saved_model` for inference. The input node must accept a tf.Example proto. 2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example protos containing images for inference. Example Usage: -------------- python tensorflow_models/object_detection/export_inference_graph.py \ --alsologtostderr \ --input_type tf_example \ --pipeline_config_path path/to/faster_rcnn_model.config \ --trained_checkpoint_prefix path/to/model.ckpt \ --output_directory path/to/exported_model_directory \ --additional_output_tensor_names detection_features python generate_embedding_data.py \ --alsologtostderr \ --embedding_input_tfrecord path/to/input_tfrecords* \ --embedding_output_tfrecord path/to/output_tfrecords \ --embedding_model_dir path/to/exported_model_directory/saved_model """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import datetime import os import threading import numpy as np import six import tensorflow as tf try: import apache_beam as beam # pylint:disable=g-import-not-at-top except ModuleNotFoundError: pass def add_keys(serialized_example): key = hash(serialized_example) return key, serialized_example def drop_keys(key_value_tuple): return key_value_tuple[1] def get_date_captured(example): date_captured = datetime.datetime.strptime( six.ensure_str( example.features.feature['image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S') return date_captured def embed_date_captured(date_captured): """Encodes the datetime of the image.""" embedded_date_captured = [] month_max = 12.0 day_max = 31.0 hour_max = 24.0 minute_max = 60.0 min_year = 1990.0 max_year = 2030.0 year = (date_captured.year - min_year) / float(max_year - min_year) embedded_date_captured.append(year) month = (date_captured.month - 1) / month_max embedded_date_captured.append(month) day = (date_captured.day - 1) / day_max embedded_date_captured.append(day) hour = date_captured.hour / hour_max embedded_date_captured.append(hour) minute = date_captured.minute / minute_max embedded_date_captured.append(minute) return np.asarray(embedded_date_captured) def embed_position_and_size(box): """Encodes the bounding box of the object of interest.""" ymin = box[0] xmin = box[1] ymax = box[2] xmax = box[3] w = xmax - xmin h = ymax - ymin x = xmin + w / 2.0 y = ymin + h / 2.0 return np.asarray([x, y, w, h]) def get_bb_embedding(detection_features, detection_boxes, detection_scores, index): embedding = detection_features[0][index] pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0) box = detection_boxes[0][index] position_embedding = embed_position_and_size(box) score = detection_scores[0][index] return np.concatenate((pooled_embedding, position_embedding)), score class GenerateEmbeddingDataFn(beam.DoFn): """Generates embedding data for camera trap images. This Beam DoFn performs inference with an object detection `saved_model` and produces contextual embedding vectors. """ session_lock = threading.Lock() def __init__(self, model_dir, top_k_embedding_count, bottom_k_embedding_count, embedding_type='final_box_features'): """Initialization function. Args: model_dir: A directory containing saved model. top_k_embedding_count: the number of high-confidence embeddings to store bottom_k_embedding_count: the number of low-confidence embeddings to store embedding_type: One of 'final_box_features', 'rpn_box_features' """ self._model_dir = model_dir self._session = None self._num_examples_processed = beam.metrics.Metrics.counter( 'embedding_data_generation', 'num_tf_examples_processed') self._top_k_embedding_count = top_k_embedding_count self._bottom_k_embedding_count = bottom_k_embedding_count self._embedding_type = embedding_type def setup(self): self._load_inference_model() def _load_inference_model(self): # Because initialization of the tf.Session is expensive we share # one instance across all threads in the worker. This is possible since # tf.Session.run() is thread safe. with self.session_lock: self._detect_fn = tf.saved_model.load(self._model_dir) def process(self, tfexample_key_value): return self._run_inference_and_generate_embedding(tfexample_key_value) def _run_inference_and_generate_embedding(self, tfexample_key_value): key, tfexample = tfexample_key_value input_example = tf.train.Example.FromString(tfexample) example = tf.train.Example() example.CopyFrom(input_example) try: date_captured = get_date_captured(input_example) unix_time = ((date_captured - datetime.datetime.fromtimestamp(0)).total_seconds()) example.features.feature['image/unix_time'].float_list.value.extend( [unix_time]) temporal_embedding = embed_date_captured(date_captured) except Exception: # pylint: disable=broad-except temporal_embedding = None detections = self._detect_fn.signatures['serving_default']( (tf.expand_dims(tf.convert_to_tensor(tfexample), 0))) if self._embedding_type == 'final_box_features': detection_features = detections['detection_features'] elif self._embedding_type == 'rpn_box_features': detection_features = detections['cropped_rpn_box_features'] else: raise ValueError('embedding type not supported') detection_boxes = detections['detection_boxes'] num_detections = detections['num_detections'] detection_scores = detections['detection_scores'] num_detections = int(num_detections) embed_all = [] score_all = [] detection_features = np.asarray(detection_features) embedding_count = 0 for index in range(min(num_detections, self._top_k_embedding_count)): bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, index) embed_all.extend(bb_embedding) if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) embedding_count += 1 for index in range( max(0, num_detections - 1), max(-1, num_detections - 1 - self._bottom_k_embedding_count), -1): bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, index) embed_all.extend(bb_embedding) if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) embedding_count += 1 if embedding_count == 0: bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, 0) embed_all.extend(bb_embedding) if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) # Takes max in case embedding_count is 0. embedding_length = len(embed_all) // max(1, embedding_count) embed_all = np.asarray(embed_all) example.features.feature['image/embedding'].float_list.value.extend( embed_all) example.features.feature['image/embedding_score'].float_list.value.extend( score_all) example.features.feature['image/embedding_length'].int64_list.value.append( embedding_length) example.features.feature['image/embedding_count'].int64_list.value.append( embedding_count) self._num_examples_processed.inc(1) return [(key, example)] def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir, top_k_embedding_count, bottom_k_embedding_count, num_shards, embedding_type): """Returns a beam pipeline to run object detection inference. Args: pipeline: Initialized beam pipeline. input_tfrecord: An TFRecord of tf.train.Example protos containing images. output_tfrecord: An TFRecord of tf.train.Example protos that contain images in the input TFRecord and the detections from the model. model_dir: Path to `saved_model` to use for inference. top_k_embedding_count: The number of high-confidence embeddings to store. bottom_k_embedding_count: The number of low-confidence embeddings to store. num_shards: The number of output shards. embedding_type: Which features to embed. """ input_collection = ( pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( input_tfrecord, coder=beam.coders.BytesCoder()) | 'AddKeys' >> beam.Map(add_keys)) output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo( GenerateEmbeddingDataFn(model_dir, top_k_embedding_count, bottom_k_embedding_count, embedding_type)) output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() _ = output_collection | 'DropKeys' >> beam.Map( drop_keys) | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( output_tfrecord, num_shards=num_shards, coder=beam.coders.ProtoCoder(tf.train.Example)) def parse_args(argv): """Command-line argument parser. Args: argv: command line arguments Returns: beam_args: Arguments for the beam pipeline. pipeline_args: Arguments for the pipeline options, such as runner type. """ parser = argparse.ArgumentParser() parser.add_argument( '--embedding_input_tfrecord', dest='embedding_input_tfrecord', required=True, help='TFRecord containing images in tf.Example format for object ' 'detection.') parser.add_argument( '--embedding_output_tfrecord', dest='embedding_output_tfrecord', required=True, help='TFRecord containing embeddings in tf.Example format.') parser.add_argument( '--embedding_model_dir', dest='embedding_model_dir', required=True, help='Path to directory containing an object detection SavedModel with' 'detection_box_classifier_features in the output.') parser.add_argument( '--top_k_embedding_count', dest='top_k_embedding_count', default=1, help='The number of top k embeddings to add to the memory bank.') parser.add_argument( '--bottom_k_embedding_count', dest='bottom_k_embedding_count', default=0, help='The number of bottom k embeddings to add to the memory bank.') parser.add_argument( '--num_shards', dest='num_shards', default=0, help='Number of output shards.') parser.add_argument( '--embedding_type', dest='embedding_type', default='final_box_features', help='What features to embed, supports `final_box_features`, ' '`rpn_box_features`.') beam_args, pipeline_args = parser.parse_known_args(argv) return beam_args, pipeline_args def main(argv=None, save_main_session=True): """Runs the Beam pipeline that performs inference. Args: argv: Command line arguments. save_main_session: Whether to save the main session. """ args, pipeline_args = parse_args(argv) pipeline_options = beam.options.pipeline_options.PipelineOptions( pipeline_args) pipeline_options.view_as( beam.options.pipeline_options.SetupOptions).save_main_session = ( save_main_session) dirname = os.path.dirname(args.embedding_output_tfrecord) tf.io.gfile.makedirs(dirname) p = beam.Pipeline(options=pipeline_options) construct_pipeline( p, args.embedding_input_tfrecord, args.embedding_output_tfrecord, args.embedding_model_dir, args.top_k_embedding_count, args.bottom_k_embedding_count, args.num_shards, args.embedding_type) p.run() if __name__ == '__main__': main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py
generate_embedding_data.py
r"""Beam pipeline to create COCO Camera Traps Object Detection TFRecords. Please note that this tool creates sharded output files. This tool assumes the input annotations are in the COCO Camera Traps json format, specified here: https://github.com/Microsoft/CameraTraps/blob/master/data_management/README.md Example usage: python create_cococameratraps_tfexample_main.py \ --alsologtostderr \ --output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \ --image_directory="/path/to/image/folder/" \ --input_annotations_file="path/to/annotations.json" """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import hashlib import io import json import os import numpy as np import PIL.Image import tensorflow as tf from object_detection.utils import dataset_util try: import apache_beam as beam # pylint:disable=g-import-not-at-top except ModuleNotFoundError: pass class ParseImage(beam.DoFn): """A DoFn that parses a COCO-CameraTraps json and emits TFRecords.""" def __init__(self, image_directory, images, annotations, categories, keep_bboxes): """Initialization function. Args: image_directory: Path to image directory images: list of COCO Camera Traps style image dictionaries annotations: list of COCO Camera Traps style annotation dictionaries categories: list of COCO Camera Traps style category dictionaries keep_bboxes: Whether to keep any bounding boxes that exist in the annotations """ self._image_directory = image_directory self._image_dict = {im['id']: im for im in images} self._annotation_dict = {im['id']: [] for im in images} self._category_dict = {int(cat['id']): cat for cat in categories} for ann in annotations: self._annotation_dict[ann['image_id']].append(ann) self._images = images self._keep_bboxes = keep_bboxes self._num_examples_processed = beam.metrics.Metrics.counter( 'cococameratraps_data_generation', 'num_tf_examples_processed') def process(self, image_id): """Builds a tf.Example given an image id. Args: image_id: the image id of the associated image Returns: List of tf.Examples. """ image = self._image_dict[image_id] annotations = self._annotation_dict[image_id] image_height = image['height'] image_width = image['width'] filename = image['file_name'] image_id = image['id'] image_location_id = image['location'] image_datetime = str(image['date_captured']) image_sequence_id = str(image['seq_id']) image_sequence_num_frames = int(image['seq_num_frames']) image_sequence_frame_num = int(image['frame_num']) full_path = os.path.join(self._image_directory, filename) try: # Ensure the image exists and is not corrupted with tf.io.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) image = tf.io.decode_jpeg(encoded_jpg, channels=3) except Exception: # pylint: disable=broad-except # The image file is missing or corrupt return [] key = hashlib.sha256(encoded_jpg).hexdigest() feature_dict = { 'image/height': dataset_util.int64_feature(image_height), 'image/width': dataset_util.int64_feature(image_width), 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')), 'image/source_id': dataset_util.bytes_feature(str(image_id).encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/location': dataset_util.bytes_feature(str(image_location_id).encode('utf8')), 'image/seq_num_frames': dataset_util.int64_feature(image_sequence_num_frames), 'image/seq_frame_num': dataset_util.int64_feature(image_sequence_frame_num), 'image/seq_id': dataset_util.bytes_feature(image_sequence_id.encode('utf8')), 'image/date_captured': dataset_util.bytes_feature(image_datetime.encode('utf8')) } num_annotations_skipped = 0 if annotations: xmin = [] xmax = [] ymin = [] ymax = [] category_names = [] category_ids = [] area = [] for object_annotations in annotations: if 'bbox' in object_annotations and self._keep_bboxes: (x, y, width, height) = tuple(object_annotations['bbox']) if width <= 0 or height <= 0: num_annotations_skipped += 1 continue if x + width > image_width or y + height > image_height: num_annotations_skipped += 1 continue xmin.append(float(x) / image_width) xmax.append(float(x + width) / image_width) ymin.append(float(y) / image_height) ymax.append(float(y + height) / image_height) if 'area' in object_annotations: area.append(object_annotations['area']) else: # approximate area using l*w/2 area.append(width*height/2.0) category_id = int(object_annotations['category_id']) category_ids.append(category_id) category_names.append( self._category_dict[category_id]['name'].encode('utf8')) feature_dict.update({ 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(category_names), 'image/object/class/label': dataset_util.int64_list_feature(category_ids), 'image/object/area': dataset_util.float_list_feature(area), }) # For classification, add the first category to image/class/label and # image/class/text if not category_ids: feature_dict.update({ 'image/class/label': dataset_util.int64_list_feature([0]), 'image/class/text': dataset_util.bytes_list_feature(['empty'.encode('utf8')]), }) else: feature_dict.update({ 'image/class/label': dataset_util.int64_list_feature([category_ids[0]]), 'image/class/text': dataset_util.bytes_list_feature([category_names[0]]), }) else: # Add empty class if there are no annotations feature_dict.update({ 'image/class/label': dataset_util.int64_list_feature([0]), 'image/class/text': dataset_util.bytes_list_feature(['empty'.encode('utf8')]), }) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) self._num_examples_processed.inc(1) return [(example)] def load_json_data(data_file): with tf.io.gfile.GFile(data_file, 'r') as fid: data_dict = json.load(fid) return data_dict def create_pipeline(pipeline, image_directory, input_annotations_file, output_tfrecord_prefix=None, num_images_per_shard=200, keep_bboxes=True): """Creates a beam pipeline for producing a COCO-CameraTraps Image dataset. Args: pipeline: Initialized beam pipeline. image_directory: Path to image directory input_annotations_file: Path to a coco-cameratraps annotation file output_tfrecord_prefix: Absolute path for tfrecord outputs. Final files will be named {output_tfrecord_prefix}@N. num_images_per_shard: The number of images to store in each shard keep_bboxes: Whether to keep any bounding boxes that exist in the json file """ data = load_json_data(input_annotations_file) num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard)) image_examples = ( pipeline | ('CreateCollections') >> beam.Create( [im['id'] for im in data['images']]) | ('ParseImage') >> beam.ParDo(ParseImage( image_directory, data['images'], data['annotations'], data['categories'], keep_bboxes=keep_bboxes))) _ = (image_examples | ('Reshuffle') >> beam.Reshuffle() | ('WriteTfImageExample') >> beam.io.tfrecordio.WriteToTFRecord( output_tfrecord_prefix, num_shards=num_shards, coder=beam.coders.ProtoCoder(tf.train.Example))) def parse_args(argv): """Command-line argument parser. Args: argv: command line arguments Returns: beam_args: Arguments for the beam pipeline. pipeline_args: Arguments for the pipeline options, such as runner type. """ parser = argparse.ArgumentParser() parser.add_argument( '--image_directory', dest='image_directory', required=True, help='Path to the directory where the images are stored.') parser.add_argument( '--output_tfrecord_prefix', dest='output_tfrecord_prefix', required=True, help='Path and prefix to store TFRecords containing images in tf.Example' 'format.') parser.add_argument( '--input_annotations_file', dest='input_annotations_file', required=True, help='Path to Coco-CameraTraps style annotations file.') parser.add_argument( '--num_images_per_shard', dest='num_images_per_shard', default=200, help='The number of images to be stored in each outputshard.') beam_args, pipeline_args = parser.parse_known_args(argv) return beam_args, pipeline_args def main(argv=None, save_main_session=True): """Runs the Beam pipeline that performs inference. Args: argv: Command line arguments. save_main_session: Whether to save the main session. """ args, pipeline_args = parse_args(argv) pipeline_options = beam.options.pipeline_options.PipelineOptions( pipeline_args) pipeline_options.view_as( beam.options.pipeline_options.SetupOptions).save_main_session = ( save_main_session) dirname = os.path.dirname(args.output_tfrecord_prefix) tf.io.gfile.makedirs(dirname) p = beam.Pipeline(options=pipeline_options) create_pipeline( pipeline=p, image_directory=args.image_directory, input_annotations_file=args.input_annotations_file, output_tfrecord_prefix=args.output_tfrecord_prefix, num_images_per_shard=args.num_images_per_shard) p.run() if __name__ == '__main__': main()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py
create_cococameratraps_tfexample_main.py
r"""Training executable for detection models. This executable is used to train DetectionModels. There are two ways of configuring the training job: 1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file can be specified by --pipeline_config_path. Example usage: ./train \ --logtostderr \ --train_dir=path/to/train_dir \ --pipeline_config_path=pipeline_config.pbtxt 2) Three configuration files can be provided: a model_pb2.DetectionModel configuration file to define what type of DetectionModel is being trained, an input_reader_pb2.InputReader file to specify what training data will be used and a train_pb2.TrainConfig file to configure training parameters. Example usage: ./train \ --logtostderr \ --train_dir=path/to/train_dir \ --model_config_path=model_config.pbtxt \ --train_config_path=train_config.pbtxt \ --input_config_path=train_input_config.pbtxt """ import functools import json import os import tensorflow.compat.v1 as tf from tensorflow.python.util.deprecation import deprecated from object_detection.builders import dataset_builder from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.legacy import trainer from object_detection.utils import config_util tf.logging.set_verbosity(tf.logging.INFO) flags = tf.app.flags flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') flags.DEFINE_integer('task', 0, 'task id') flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.') flags.DEFINE_boolean('clone_on_cpu', False, 'Force clones to be deployed on CPU. Note that even if ' 'set to False (allowing ops to run on gpu), some ops may ' 'still be run on the CPU if they have no GPU kernel.') flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer ' 'replicas.') flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter server tasks. If None, does not use ' 'a parameter server.') flags.DEFINE_string('train_dir', '', 'Directory to save the checkpoints and training summaries.') flags.DEFINE_string('pipeline_config_path', '', 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file. If provided, other configs are ignored') flags.DEFINE_string('train_config_path', '', 'Path to a train_pb2.TrainConfig config file.') flags.DEFINE_string('input_config_path', '', 'Path to an input_reader_pb2.InputReader config file.') flags.DEFINE_string('model_config_path', '', 'Path to a model_pb2.DetectionModel config file.') FLAGS = flags.FLAGS @deprecated(None, 'Use object_detection/model_main.py.') def main(_): assert FLAGS.train_dir, '`train_dir` is missing.' if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir) if FLAGS.pipeline_config_path: configs = config_util.get_configs_from_pipeline_file( FLAGS.pipeline_config_path) if FLAGS.task == 0: tf.gfile.Copy(FLAGS.pipeline_config_path, os.path.join(FLAGS.train_dir, 'pipeline.config'), overwrite=True) else: configs = config_util.get_configs_from_multiple_files( model_config_path=FLAGS.model_config_path, train_config_path=FLAGS.train_config_path, train_input_config_path=FLAGS.input_config_path) if FLAGS.task == 0: for name, config in [('model.config', FLAGS.model_config_path), ('train.config', FLAGS.train_config_path), ('input.config', FLAGS.input_config_path)]: tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name), overwrite=True) model_config = configs['model'] train_config = configs['train_config'] input_config = configs['train_input_config'] model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) def get_next(config): return dataset_builder.make_initializable_iterator( dataset_builder.build(config)).get_next() create_input_dict_fn = functools.partial(get_next, input_config) env = json.loads(os.environ.get('TF_CONFIG', '{}')) cluster_data = env.get('cluster', None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None task_data = env.get('task', None) or {'type': 'master', 'index': 0} task_info = type('TaskSpec', (object,), task_data) # Parameters for a single worker. ps_tasks = 0 worker_replicas = 1 worker_job_name = 'lonely_worker' task = 0 is_chief = True master = '' if cluster_data and 'worker' in cluster_data: # Number of total worker replicas include "worker"s and the "master". worker_replicas = len(cluster_data['worker']) + 1 if cluster_data and 'ps' in cluster_data: ps_tasks = len(cluster_data['ps']) if worker_replicas > 1 and ps_tasks < 1: raise ValueError('At least 1 ps task is needed for distributed training.') if worker_replicas >= 1 and ps_tasks > 0: # Set up distributed training. server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc', job_name=task_info.type, task_index=task_info.index) if task_info.type == 'ps': server.join() return worker_job_name = '%s/task:%d' % (task_info.type, task_info.index) task = task_info.index is_chief = (task_info.type == 'master') master = server.target graph_rewriter_fn = None if 'graph_rewriter_config' in configs: graph_rewriter_fn = graph_rewriter_builder.build( configs['graph_rewriter_config'], is_training=True) trainer.train( create_input_dict_fn, model_fn, train_config, master, task, FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks, worker_job_name, is_chief, FLAGS.train_dir, graph_hook_fn=graph_rewriter_fn) if __name__ == '__main__': tf.app.run()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/train.py
train.py
import logging import tensorflow.compat.v1 as tf from object_detection import eval_util from object_detection.core import prefetcher from object_detection.core import standard_fields as fields from object_detection.metrics import coco_evaluation from object_detection.utils import object_detection_evaluation # A dictionary of metric names to classes that implement the metric. The classes # in the dictionary must implement # utils.object_detection_evaluation.DetectionEvaluator interface. EVAL_METRICS_CLASS_DICT = { 'pascal_voc_detection_metrics': object_detection_evaluation.PascalDetectionEvaluator, 'weighted_pascal_voc_detection_metrics': object_detection_evaluation.WeightedPascalDetectionEvaluator, 'pascal_voc_instance_segmentation_metrics': object_detection_evaluation.PascalInstanceSegmentationEvaluator, 'weighted_pascal_voc_instance_segmentation_metrics': object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, 'oid_V2_detection_metrics': object_detection_evaluation.OpenImagesDetectionEvaluator, # DEPRECATED: please use oid_V2_detection_metrics instead 'open_images_V2_detection_metrics': object_detection_evaluation.OpenImagesDetectionEvaluator, 'coco_detection_metrics': coco_evaluation.CocoDetectionEvaluator, 'coco_mask_metrics': coco_evaluation.CocoMaskEvaluator, 'oid_challenge_detection_metrics': object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, # DEPRECATED: please use oid_challenge_detection_metrics instead 'oid_challenge_object_detection_metrics': object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, 'oid_challenge_segmentation_metrics': object_detection_evaluation .OpenImagesInstanceSegmentationChallengeEvaluator, } EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics' def _extract_predictions_and_losses(model, create_input_dict_fn, ignore_groundtruth=False): """Constructs tensorflow detection graph and returns output tensors. Args: model: model to perform predictions with. create_input_dict_fn: function to create input tensor dictionaries. ignore_groundtruth: whether groundtruth should be ignored. Returns: prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed by standard_fields.DetectionResultsFields) and optional groundtruth tensors (keyed by standard_fields.InputDataFields). losses_dict: A dictionary containing detection losses. This is empty when ignore_groundtruth is true. """ input_dict = create_input_dict_fn() prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) input_dict = prefetch_queue.dequeue() original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0) preprocessed_image, true_image_shapes = model.preprocess( tf.cast(original_image, dtype=tf.float32)) prediction_dict = model.predict(preprocessed_image, true_image_shapes) detections = model.postprocess(prediction_dict, true_image_shapes) groundtruth = None losses_dict = {} if not ignore_groundtruth: groundtruth = { fields.InputDataFields.groundtruth_boxes: input_dict[fields.InputDataFields.groundtruth_boxes], fields.InputDataFields.groundtruth_classes: input_dict[fields.InputDataFields.groundtruth_classes], fields.InputDataFields.groundtruth_area: input_dict[fields.InputDataFields.groundtruth_area], fields.InputDataFields.groundtruth_is_crowd: input_dict[fields.InputDataFields.groundtruth_is_crowd], fields.InputDataFields.groundtruth_difficult: input_dict[fields.InputDataFields.groundtruth_difficult] } if fields.InputDataFields.groundtruth_group_of in input_dict: groundtruth[fields.InputDataFields.groundtruth_group_of] = ( input_dict[fields.InputDataFields.groundtruth_group_of]) groundtruth_masks_list = None if fields.DetectionResultFields.detection_masks in detections: groundtruth[fields.InputDataFields.groundtruth_instance_masks] = ( input_dict[fields.InputDataFields.groundtruth_instance_masks]) groundtruth_masks_list = [ input_dict[fields.InputDataFields.groundtruth_instance_masks]] groundtruth_keypoints_list = None if fields.DetectionResultFields.detection_keypoints in detections: groundtruth[fields.InputDataFields.groundtruth_keypoints] = ( input_dict[fields.InputDataFields.groundtruth_keypoints]) groundtruth_keypoints_list = [ input_dict[fields.InputDataFields.groundtruth_keypoints]] label_id_offset = 1 model.provide_groundtruth( [input_dict[fields.InputDataFields.groundtruth_boxes]], [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes] - label_id_offset, depth=model.num_classes)], groundtruth_masks_list, groundtruth_keypoints_list) losses_dict.update(model.loss(prediction_dict, true_image_shapes)) result_dict = eval_util.result_dict_for_single_example( original_image, input_dict[fields.InputDataFields.source_id], detections, groundtruth, class_agnostic=( fields.DetectionResultFields.detection_classes not in detections), scale_to_absolute=True) return result_dict, losses_dict def get_evaluators(eval_config, categories): """Returns the evaluator class according to eval_config, valid for categories. Args: eval_config: evaluation configurations. categories: a list of categories to evaluate. Returns: An list of instances of DetectionEvaluator. Raises: ValueError: if metric is not in the metric class dictionary. """ eval_metric_fn_keys = eval_config.metrics_set if not eval_metric_fn_keys: eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] evaluators_list = [] for eval_metric_fn_key in eval_metric_fn_keys: if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) if eval_metric_fn_key == 'oid_challenge_object_detection_metrics': logging.warning( 'oid_challenge_object_detection_metrics is deprecated; ' 'use oid_challenge_detection_metrics instead' ) if eval_metric_fn_key == 'oid_V2_detection_metrics': logging.warning( 'open_images_V2_detection_metrics is deprecated; ' 'use oid_V2_detection_metrics instead' ) evaluators_list.append( EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories)) return evaluators_list def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories, checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None): """Evaluation function for detection models. Args: create_input_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel. eval_config: a eval_pb2.EvalConfig protobuf. categories: a list of category dictionaries. Each dict in the list should have an integer 'id' field and string 'name' field. checkpoint_dir: directory to load the checkpoints to evaluate from. eval_dir: directory to write evaluation metrics summary to. graph_hook_fn: Optional function that is called after the training graph is completely built. This is helpful to perform additional changes to the training graph such as optimizing batchnorm. The function should modify the default graph. evaluator_list: Optional list of instances of DetectionEvaluator. If not given, this list of metrics is created according to the eval_config. Returns: metrics: A dictionary containing metric names and values from the latest run. """ model = create_model_fn() if eval_config.ignore_groundtruth and not eval_config.export_path: logging.fatal('If ignore_groundtruth=True then an export_path is ' 'required. Aborting!!!') tensor_dict, losses_dict = _extract_predictions_and_losses( model=model, create_input_dict_fn=create_input_dict_fn, ignore_groundtruth=eval_config.ignore_groundtruth) def _process_batch(tensor_dict, sess, batch_index, counters, losses_dict=None): """Evaluates tensors in tensor_dict, losses_dict and visualizes examples. This function calls sess.run on tensor_dict, evaluating the original_image tensor only on the first K examples and visualizing detections overlaid on this original_image. Args: tensor_dict: a dictionary of tensors sess: tensorflow session batch_index: the index of the batch amongst all batches in the run. counters: a dictionary holding 'success' and 'skipped' fields which can be updated to keep track of number of successful and failed runs, respectively. If these fields are not updated, then the success/skipped counter values shown at the end of evaluation will be incorrect. losses_dict: Optional dictonary of scalar loss tensors. Returns: result_dict: a dictionary of numpy arrays result_losses_dict: a dictionary of scalar losses. This is empty if input losses_dict is None. """ try: if not losses_dict: losses_dict = {} result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict]) counters['success'] += 1 except tf.errors.InvalidArgumentError: logging.info('Skipping image') counters['skipped'] += 1 return {}, {} global_step = tf.train.global_step(sess, tf.train.get_global_step()) if batch_index < eval_config.num_visualizations: tag = 'image-{}'.format(batch_index) eval_util.visualize_detection_results( result_dict, tag, global_step, categories=categories, summary_dir=eval_dir, export_dir=eval_config.visualization_export_dir, show_groundtruth=eval_config.visualize_groundtruth_boxes, groundtruth_box_visualization_color=eval_config. groundtruth_box_visualization_color, min_score_thresh=eval_config.min_score_threshold, max_num_predictions=eval_config.max_num_boxes_to_visualize, skip_scores=eval_config.skip_scores, skip_labels=eval_config.skip_labels, keep_image_id_for_visualization_export=eval_config. keep_image_id_for_visualization_export) return result_dict, result_losses_dict if graph_hook_fn: graph_hook_fn() variables_to_restore = tf.global_variables() global_step = tf.train.get_or_create_global_step() variables_to_restore.append(global_step) if eval_config.use_moving_averages: variable_averages = tf.train.ExponentialMovingAverage(0.0) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) def _restore_latest_checkpoint(sess): latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) saver.restore(sess, latest_checkpoint) if not evaluator_list: evaluator_list = get_evaluators(eval_config, categories) metrics = eval_util.repeated_checkpoint_run( tensor_dict=tensor_dict, summary_dir=eval_dir, evaluators=evaluator_list, batch_processor=_process_batch, checkpoint_dirs=[checkpoint_dir], variables_to_restore=None, restore_fn=_restore_latest_checkpoint, num_batches=eval_config.num_examples, eval_interval_secs=eval_config.eval_interval_secs, max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else eval_config.max_evals if eval_config.max_evals else None), master=eval_config.eval_master, save_graph=eval_config.save_graph, save_graph_dir=(eval_dir if eval_config.save_graph else ''), losses_dict=losses_dict, eval_export_path=eval_config.export_path) return metrics
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/evaluator.py
evaluator.py
r"""Evaluation executable for detection models. This executable is used to evaluate DetectionModels. There are two ways of configuring the eval job. 1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead. In this mode, the --eval_training_data flag may be given to force the pipeline to evaluate on training data instead. Example usage: ./eval \ --logtostderr \ --checkpoint_dir=path/to/checkpoint_dir \ --eval_dir=path/to/eval_dir \ --pipeline_config_path=pipeline_config.pbtxt 2) Three configuration files may be provided: a model_pb2.DetectionModel configuration file to define what type of DetectionModel is being evaluated, an input_reader_pb2.InputReader file to specify what data the model is evaluating and an eval_pb2.EvalConfig file to configure evaluation parameters. Example usage: ./eval \ --logtostderr \ --checkpoint_dir=path/to/checkpoint_dir \ --eval_dir=path/to/eval_dir \ --eval_config_path=eval_config.pbtxt \ --model_config_path=model_config.pbtxt \ --input_config_path=eval_input_config.pbtxt """ import functools import os import tensorflow.compat.v1 as tf from tensorflow.python.util.deprecation import deprecated from object_detection.builders import dataset_builder from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.legacy import evaluator from object_detection.utils import config_util from object_detection.utils import label_map_util tf.logging.set_verbosity(tf.logging.INFO) flags = tf.app.flags flags.DEFINE_boolean('eval_training_data', False, 'If training data should be evaluated for this job.') flags.DEFINE_string( 'checkpoint_dir', '', 'Directory containing checkpoints to evaluate, typically ' 'set to `train_dir` used in the training job.') flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.') flags.DEFINE_string( 'pipeline_config_path', '', 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file. If provided, other configs are ignored') flags.DEFINE_string('eval_config_path', '', 'Path to an eval_pb2.EvalConfig config file.') flags.DEFINE_string('input_config_path', '', 'Path to an input_reader_pb2.InputReader config file.') flags.DEFINE_string('model_config_path', '', 'Path to a model_pb2.DetectionModel config file.') flags.DEFINE_boolean( 'run_once', False, 'Option to only run a single pass of ' 'evaluation. Overrides the `max_evals` parameter in the ' 'provided config.') FLAGS = flags.FLAGS @deprecated(None, 'Use object_detection/model_main.py.') def main(unused_argv): assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.' assert FLAGS.eval_dir, '`eval_dir` is missing.' tf.gfile.MakeDirs(FLAGS.eval_dir) if FLAGS.pipeline_config_path: configs = config_util.get_configs_from_pipeline_file( FLAGS.pipeline_config_path) tf.gfile.Copy( FLAGS.pipeline_config_path, os.path.join(FLAGS.eval_dir, 'pipeline.config'), overwrite=True) else: configs = config_util.get_configs_from_multiple_files( model_config_path=FLAGS.model_config_path, eval_config_path=FLAGS.eval_config_path, eval_input_config_path=FLAGS.input_config_path) for name, config in [('model.config', FLAGS.model_config_path), ('eval.config', FLAGS.eval_config_path), ('input.config', FLAGS.input_config_path)]: tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True) model_config = configs['model'] eval_config = configs['eval_config'] input_config = configs['eval_input_config'] if FLAGS.eval_training_data: input_config = configs['train_input_config'] model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) def get_next(config): return dataset_builder.make_initializable_iterator( dataset_builder.build(config)).get_next() create_input_dict_fn = functools.partial(get_next, input_config) categories = label_map_util.create_categories_from_labelmap( input_config.label_map_path) if FLAGS.run_once: eval_config.max_evals = 1 graph_rewriter_fn = None if 'graph_rewriter_config' in configs: graph_rewriter_fn = graph_rewriter_builder.build( configs['graph_rewriter_config'], is_training=False) evaluator.evaluate( create_input_dict_fn, model_fn, eval_config, categories, FLAGS.checkpoint_dir, FLAGS.eval_dir, graph_hook_fn=graph_rewriter_fn) if __name__ == '__main__': tf.app.run()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/eval.py
eval.py
import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.builders import optimizer_builder from object_detection.builders import preprocessor_builder from object_detection.core import batcher from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.utils import ops as util_ops from object_detection.utils import variables_helper from deployment import model_deploy def create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.cast(images, dtype=tf.float32) tensor_dict[fields.InputDataFields.image] = float_images include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) include_multiclass_scores = (fields.InputDataFields.multiclass_scores in tensor_dict) if data_augmentation_options: tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_label_weights=True, include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False, use_multiclass_scores=False): """Dequeues batch and constructs inputs to object detection model. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. num_classes: Number of classes. merge_multiple_label_boxes: Whether to merge boxes with multiple labels or not. Defaults to false. Merged boxes are represented with a single box and a k-hot encoding of the multiple labels associated with the boxes. use_multiclass_scores: Whether to use multiclass scores instead of groundtruth_classes. Returns: images: a list of 3-D float tensor of images. image_keys: a list of string keys for the images. locations_list: a list of tensors of shape [num_boxes, 4] containing the corners of the groundtruth boxes. classes_list: a list of padded one-hot (or K-hot) float32 tensors containing target classes. masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, image_width] containing instance masks for objects if present in the input_queue. Else returns None. keypoints_list: a list of 3-D float tensors of shape [num_boxes, num_keypoints, 2] containing keypoints for objects if present in the input queue. Else returns None. weights_lists: a list of 1-D float32 tensors of shape [num_boxes] containing groundtruth weight for each box. """ read_data_list = input_queue.dequeue() label_id_offset = 1 def extract_images_and_targets(read_data): """Extract images and targets from the input dict.""" image = read_data[fields.InputDataFields.image] key = '' if fields.InputDataFields.source_id in read_data: key = read_data[fields.InputDataFields.source_id] location_gt = read_data[fields.InputDataFields.groundtruth_boxes] classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32) classes_gt -= label_id_offset if merge_multiple_label_boxes and use_multiclass_scores: raise ValueError( 'Using both merge_multiple_label_boxes and use_multiclass_scores is' 'not supported' ) if merge_multiple_label_boxes: location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( location_gt, classes_gt, num_classes) classes_gt = tf.cast(classes_gt, tf.float32) elif use_multiclass_scores: classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], tf.float32) else: classes_gt = util_ops.padded_one_hot_encoding( indices=classes_gt, depth=num_classes, left_pad=0) masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) if (merge_multiple_label_boxes and ( masks_gt is not None or keypoints_gt is not None)): raise NotImplementedError('Multi-label support is only for boxes.') weights_gt = read_data.get( fields.InputDataFields.groundtruth_weights) return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, weights_gt) return zip(*map(extract_images_and_targets, read_data_list)) def _create_losses(input_queue, create_model_fn, train_config): """Creates loss function for a DetectionModel. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. create_model_fn: A function to create the DetectionModel. train_config: a train_pb2.TrainConfig protobuf. """ detection_model = create_model_fn() (images, _, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list, groundtruth_weights_list) = get_inputs( input_queue, detection_model.num_classes, train_config.merge_multiple_label_boxes, train_config.use_multiclass_scores) preprocessed_images = [] true_image_shapes = [] for image in images: resized_image, true_image_shape = detection_model.preprocess(image) preprocessed_images.append(resized_image) true_image_shapes.append(true_image_shape) images = tf.concat(preprocessed_images, 0) true_image_shapes = tf.concat(true_image_shapes, 0) if any(mask is None for mask in groundtruth_masks_list): groundtruth_masks_list = None if any(keypoints is None for keypoints in groundtruth_keypoints_list): groundtruth_keypoints_list = None detection_model.provide_groundtruth( groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list, groundtruth_weights_list=groundtruth_weights_list) prediction_dict = detection_model.predict(images, true_image_shapes) losses_dict = detection_model.loss(prediction_dict, true_image_shapes) for loss_tensor in losses_dict.values(): tf.losses.add_loss(loss_tensor) def train(create_tensor_dict_fn, create_model_fn, train_config, master, task, num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name, is_chief, train_dir, graph_hook_fn=None): """Training function for detection models. Args: create_tensor_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel and generates losses. train_config: a train_pb2.TrainConfig protobuf. master: BNS name of the TensorFlow master to use. task: The task id of this training instance. num_clones: The number of clones to run per machine. worker_replicas: The number of work replicas to train with. clone_on_cpu: True if clones should be forced to run on CPU. ps_tasks: Number of parameter server tasks. worker_job_name: Name of the worker job. is_chief: Whether this replica is the chief replica. train_dir: Directory to write checkpoints and training summaries to. graph_hook_fn: Optional function that is called after the inference graph is built (before optimization). This is helpful to perform additional changes to the training graph such as adding FakeQuant ops. The function should modify the default graph. Raises: ValueError: If both num_clones > 1 and train_config.sync_replicas is true. """ detection_model = create_model_fn() data_augmentation_options = [ preprocessor_builder.build(step) for step in train_config.data_augmentation_options] with tf.Graph().as_default(): # Build a configuration specifying multi-GPU and multi-replicas. deploy_config = model_deploy.DeploymentConfig( num_clones=num_clones, clone_on_cpu=clone_on_cpu, replica_id=task, num_replicas=worker_replicas, num_ps_tasks=ps_tasks, worker_job_name=worker_job_name) # Place the global step on the device storing the variables. with tf.device(deploy_config.variables_device()): global_step = slim.create_global_step() if num_clones != 1 and train_config.sync_replicas: raise ValueError('In Synchronous SGD mode num_clones must ', 'be 1. Found num_clones: {}'.format(num_clones)) batch_size = train_config.batch_size // num_clones if train_config.sync_replicas: batch_size //= train_config.replicas_to_aggregate with tf.device(deploy_config.inputs_device()): input_queue = create_input_queue( batch_size, create_tensor_dict_fn, train_config.batch_queue_capacity, train_config.num_batch_queue_threads, train_config.prefetch_queue_capacity, data_augmentation_options) # Gather initial summaries. # TODO(rathodv): See if summaries can be added/extracted from global tf # collections so that they don't have to be passed around. summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) global_summaries = set([]) model_fn = functools.partial(_create_losses, create_model_fn=create_model_fn, train_config=train_config) clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue]) first_clone_scope = clones[0].scope if graph_hook_fn: with tf.device(deploy_config.variables_device()): graph_hook_fn() # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) with tf.device(deploy_config.optimizer_device()): training_optimizer, optimizer_summary_vars = optimizer_builder.build( train_config.optimizer) for var in optimizer_summary_vars: tf.summary.scalar(var.op.name, var, family='LearningRate') sync_optimizer = None if train_config.sync_replicas: training_optimizer = tf.train.SyncReplicasOptimizer( training_optimizer, replicas_to_aggregate=train_config.replicas_to_aggregate, total_num_replicas=worker_replicas) sync_optimizer = training_optimizer with tf.device(deploy_config.optimizer_device()): regularization_losses = (None if train_config.add_regularization_loss else []) total_loss, grads_and_vars = model_deploy.optimize_clones( clones, training_optimizer, regularization_losses=regularization_losses) total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') # Optionally multiply bias gradients by train_config.bias_grad_multiplier. if train_config.bias_grad_multiplier: biases_regex_list = ['.*/biases'] grads_and_vars = variables_helper.multiply_gradients_matching_regex( grads_and_vars, biases_regex_list, multiplier=train_config.bias_grad_multiplier) # Optionally freeze some layers by setting their gradients to be zero. if train_config.freeze_variables: grads_and_vars = variables_helper.freeze_gradients_matching_regex( grads_and_vars, train_config.freeze_variables) # Optionally clip gradients if train_config.gradient_clipping_by_norm > 0: with tf.name_scope('clip_grads'): grads_and_vars = slim.learning.clip_gradient_norms( grads_and_vars, train_config.gradient_clipping_by_norm) # Create gradient updates. grad_updates = training_optimizer.apply_gradients(grads_and_vars, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops, name='update_barrier') with tf.control_dependencies([update_op]): train_tensor = tf.identity(total_loss, name='train_op') # Add summaries. for model_var in slim.get_model_variables(): global_summaries.add(tf.summary.histogram('ModelVars/' + model_var.op.name, model_var)) for loss_tensor in tf.losses.get_losses(): global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name, loss_tensor)) global_summaries.add( tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss())) # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) summaries |= global_summaries # Merge all summaries together. summary_op = tf.summary.merge(list(summaries), name='summary_op') # Soft placement allows placing on CPU ops without GPU implementation. session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) # Save checkpoints regularly. keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours saver = tf.train.Saver( keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) # Create ops required to initialize the model from a given checkpoint. init_fn = None if train_config.fine_tune_checkpoint: if not train_config.fine_tune_checkpoint_type: # train_config.from_detection_checkpoint field is deprecated. For # backward compatibility, fine_tune_checkpoint_type is set based on # from_detection_checkpoint. if train_config.from_detection_checkpoint: train_config.fine_tune_checkpoint_type = 'detection' else: train_config.fine_tune_checkpoint_type = 'classification' var_map = detection_model.restore_map( fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, load_all_detection_checkpoint_vars=( train_config.load_all_detection_checkpoint_vars)) available_var_map = (variables_helper. get_variables_available_in_checkpoint( var_map, train_config.fine_tune_checkpoint, include_global_step=False)) init_saver = tf.train.Saver(available_var_map) def initializer_fn(sess): init_saver.restore(sess, train_config.fine_tune_checkpoint) init_fn = initializer_fn slim.learning.train( train_tensor, logdir=train_dir, master=master, is_chief=is_chief, session_config=session_config, startup_delay_steps=train_config.startup_delay_steps, init_fn=init_fn, summary_op=summary_op, number_of_steps=( train_config.num_steps if train_config.num_steps else None), save_summaries_secs=120, sync_optimizer=sync_optimizer, saver=saver)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/legacy/trainer.py
trainer.py
r"""Infers detections on a TFRecord of TFExamples given an inference graph. Example usage: ./infer_detections \ --input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \ --output_tfrecord_path=/path/to/output/detections.tfrecord \ --inference_graph=/path/to/frozen_weights_inference_graph.pb The output is a TFRecord of TFExamples. Each TFExample from the input is first augmented with detections from the inference graph and then copied to the output. The input and output nodes of the inference graph are expected to have the same types, shapes, and semantics, as the input and output nodes of graphs produced by export_inference_graph.py, when run with --input_type=image_tensor. The script can also discard the image pixels in the output. This greatly reduces the output size and can potentially accelerate reading data in subsequent processing steps that don't require the images (e.g. computing metrics). """ import itertools import tensorflow.compat.v1 as tf from object_detection.inference import detection_inference tf.flags.DEFINE_string('input_tfrecord_paths', None, 'A comma separated list of paths to input TFRecords.') tf.flags.DEFINE_string('output_tfrecord_path', None, 'Path to the output TFRecord.') tf.flags.DEFINE_string('inference_graph', None, 'Path to the inference graph with embedded weights.') tf.flags.DEFINE_boolean('discard_image_pixels', False, 'Discards the images in the output TFExamples. This' ' significantly reduces the output size and is useful' ' if the subsequent tools don\'t need access to the' ' images (e.g. when computing evaluation measures).') FLAGS = tf.flags.FLAGS def main(_): tf.logging.set_verbosity(tf.logging.INFO) required_flags = ['input_tfrecord_paths', 'output_tfrecord_path', 'inference_graph'] for flag_name in required_flags: if not getattr(FLAGS, flag_name): raise ValueError('Flag --{} is required'.format(flag_name)) with tf.Session() as sess: input_tfrecord_paths = [ v for v in FLAGS.input_tfrecord_paths.split(',') if v] tf.logging.info('Reading input from %d files', len(input_tfrecord_paths)) serialized_example_tensor, image_tensor = detection_inference.build_input( input_tfrecord_paths) tf.logging.info('Reading graph and building model...') (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) = detection_inference.build_inference_graph( image_tensor, FLAGS.inference_graph) tf.logging.info('Running inference and writing output to {}'.format( FLAGS.output_tfrecord_path)) sess.run(tf.local_variables_initializer()) tf.train.start_queue_runners() with tf.python_io.TFRecordWriter( FLAGS.output_tfrecord_path) as tf_record_writer: try: for counter in itertools.count(): tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10, counter) tf_example = detection_inference.infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, FLAGS.discard_image_pixels) tf_record_writer.write(tf_example.SerializeToString()) except tf.errors.OutOfRangeError: tf.logging.info('Finished processing records') if __name__ == '__main__': tf.app.run()
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inference/infer_detections.py
infer_detections.py
"""Utility functions for detection inference.""" from __future__ import division import tensorflow.compat.v1 as tf from object_detection.core import standard_fields def build_input(tfrecord_paths): """Builds the graph's input. Args: tfrecord_paths: List of paths to the input TFRecords Returns: serialized_example_tensor: The next serialized example. String scalar Tensor image_tensor: The decoded image of the example. Uint8 tensor, shape=[1, None, None,3] """ filename_queue = tf.train.string_input_producer( tfrecord_paths, shuffle=False, num_epochs=1) tf_record_reader = tf.TFRecordReader() _, serialized_example_tensor = tf_record_reader.read(filename_queue) features = tf.parse_single_example( serialized_example_tensor, features={ standard_fields.TfExampleFields.image_encoded: tf.FixedLenFeature([], tf.string), }) encoded_image = features[standard_fields.TfExampleFields.image_encoded] image_tensor = tf.image.decode_image(encoded_image, channels=3) image_tensor.set_shape([None, None, 3]) image_tensor = tf.expand_dims(image_tensor, 0) return serialized_example_tensor, image_tensor def build_inference_graph(image_tensor, inference_graph_path): """Loads the inference graph and connects it to the input image. Args: image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3] inference_graph_path: Path to the inference graph with embedded weights Returns: detected_boxes_tensor: Detected boxes. Float tensor, shape=[num_detections, 4] detected_scores_tensor: Detected scores. Float tensor, shape=[num_detections] detected_labels_tensor: Detected labels. Int64 tensor, shape=[num_detections] """ with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file: graph_content = graph_def_file.read() graph_def = tf.GraphDef() graph_def.MergeFromString(graph_content) tf.import_graph_def( graph_def, name='', input_map={'image_tensor': image_tensor}) g = tf.get_default_graph() num_detections_tensor = tf.squeeze( g.get_tensor_by_name('num_detections:0'), 0) num_detections_tensor = tf.cast(num_detections_tensor, tf.int32) detected_boxes_tensor = tf.squeeze( g.get_tensor_by_name('detection_boxes:0'), 0) detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor] detected_scores_tensor = tf.squeeze( g.get_tensor_by_name('detection_scores:0'), 0) detected_scores_tensor = detected_scores_tensor[:num_detections_tensor] detected_labels_tensor = tf.squeeze( g.get_tensor_by_name('detection_classes:0'), 0) detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64) detected_labels_tensor = detected_labels_tensor[:num_detections_tensor] return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor def infer_detections_and_add_to_example( serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor, discard_image_pixels): """Runs the supplied tensors and adds the inferred detections to the example. Args: serialized_example_tensor: Serialized TF example. Scalar string tensor detected_boxes_tensor: Detected boxes. Float tensor, shape=[num_detections, 4] detected_scores_tensor: Detected scores. Float tensor, shape=[num_detections] detected_labels_tensor: Detected labels. Int64 tensor, shape=[num_detections] discard_image_pixels: If true, discards the image from the result Returns: The de-serialized TF example augmented with the inferred detections. """ tf_example = tf.train.Example() (serialized_example, detected_boxes, detected_scores, detected_classes) = tf.get_default_session().run([ serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor ]) detected_boxes = detected_boxes.T tf_example.ParseFromString(serialized_example) feature = tf_example.features.feature feature[standard_fields.TfExampleFields. detection_score].float_list.value[:] = detected_scores feature[standard_fields.TfExampleFields. detection_bbox_ymin].float_list.value[:] = detected_boxes[0] feature[standard_fields.TfExampleFields. detection_bbox_xmin].float_list.value[:] = detected_boxes[1] feature[standard_fields.TfExampleFields. detection_bbox_ymax].float_list.value[:] = detected_boxes[2] feature[standard_fields.TfExampleFields. detection_bbox_xmax].float_list.value[:] = detected_boxes[3] feature[standard_fields.TfExampleFields. detection_class_label].int64_list.value[:] = detected_classes if discard_image_pixels: del feature[standard_fields.TfExampleFields.image_encoded] return tf_example
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/inference/detection_inference.py
detection_inference.py
"""SSD feature extractors based on Resnet v1 and PPN architectures.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import resnet_v1 class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD feature extractor based on resnet architecture and PPN.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_base_fn, resnet_scope_name, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, base_feature_map_depth=1024, num_layers=6, override_base_feature_extractor_hyperparams=False, use_bounded_activations=False): """Resnet based PPN Feature Extractor for SSD Models. See go/pooling-pyramid for more details about PPN. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. resnet_base_fn: base resnet network to use. resnet_scope_name: scope name to construct resnet reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. base_feature_map_depth: Depth of the base feature before the max pooling. num_layers: Number of layers used to make predictions. They are pooled from the base feature. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. use_bounded_activations: Whether or not to use bounded activations for resnet v1 bottleneck residual unit. Bounded activations better lend themselves to quantized inference. """ super(_SSDResnetPpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams) self._resnet_base_fn = resnet_base_fn self._resnet_scope_name = resnet_scope_name self._base_feature_map_depth = base_feature_map_depth self._num_layers = num_layers self._use_bounded_activations = use_bounded_activations def _filter_features(self, image_features): # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead # of munging the scope here. filtered_image_features = dict({}) for key, feature in image_features.items(): feature_name = key.split('/')[-1] if feature_name in ['block2', 'block3', 'block4']: filtered_image_features[feature_name] = feature return filtered_image_features def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): with slim.arg_scope( [resnet_v1.bottleneck], use_bounded_activations=self._use_bounded_activations): _, activations = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=self._base_feature_map_depth, num_layers=self._num_layers, image_features={ 'image_features': self._filter_features(activations)['block3'] }) return list(feature_maps.values()) class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet50 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet50 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet50V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet101 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet101 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet101V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): """PPN Resnet152 v1 Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """Resnet152 v1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet152V1PpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py
ssd_resnet_v1_ppn_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """MobileNetV1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_feature_extractor.py
ssd_mobilenet_v1_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v2 from object_detection.models.keras_models import model_utils from object_detection.utils import ops from object_detection.utils import shape_utils # Total number of blocks in Mobilenet_V2 base network. NUM_LAYERS = 19 # A modified config of mobilenet v2 that makes it more detection friendly. def _create_modified_mobilenet_config(): last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256) return [last_conv] class SSDMobileNetV2FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based MobilenetV2 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Mobilenet v2 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v2 layers {layer_4, layer_7, layer_14, layer_19}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) layer_names = [layer.name for layer in full_mobilenet_v2.layers] outputs = [] for layer_idx in [4, 7, 14]: add_name = 'block_{}_add'.format(layer_idx - 2) project_name = 'block_{}_project_BN'.format(layer_idx - 2) output_layer_name = add_name if add_name in layer_names else project_name outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output) layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output outputs.append(layer_19) self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v2.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + NUM_LAYERS) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append(self._feature_blocks[level - 2]) feature_start_index = len(self._feature_blocks) - self._num_levels fpn_input_image_features = [ (key, image_features[feature_start_index + index]) for index, key in enumerate(feature_block_list)] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( self._feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( self._feature_blocks[self._base_fpn_max_level - 2])] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py
ssd_mobilenet_v2_fpn_keras_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper # pylint: disable=g-import-not-at-top try: from nets.nasnet import nasnet from nets.nasnet import nasnet_utils except: # pylint: disable=bare-except pass # pylint: enable=g-import-not-at-top arg_scope = slim.arg_scope def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the NASNet-A Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the NASNet Large Model. """ imagenet_scope = nasnet.nasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc # Note: This is largely a copy of _build_nasnet_base inside nasnet.py but # with special edits to remove instantiation of the stem and the special # ability to receive as input a pair of hidden states. def _build_nasnet_base(hidden_previous, hidden, normal_cell, reduction_cell, hparams, true_cell_num, start_cell_num): """Constructs a NASNet image model.""" # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) # Note: The None is prepended to match the behavior of _imagenet_stem() cell_outputs = [None, hidden_previous, hidden] net = hidden # NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead # start at 2.0 because 1 reduction cell has been created which would # update the filter_scaling to 2.0. filter_scaling = 2.0 # Run the cells for cell_num in range(start_cell_num, hparams.num_cells): stride = 1 if hparams.skip_reduction_layer_input: prev_layer = cell_outputs[-2] if cell_num in reduction_indices: filter_scaling *= hparams.filter_scaling_rate net = reduction_cell( net, scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) if not hparams.skip_reduction_layer_input: prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) # Final nonlinearity. # Note that we have dropped the final pooling, dropout and softmax layers # from the default nasnet version. with tf.variable_scope('final_layer'): net = tf.nn.relu(net) return net # TODO(shlens): Only fixed_shape_resizer is currently supported for NASNet # featurization. The reason for this is that nasnet.py only supports # inputs with fully known shapes. We need to update nasnet.py to handle # shapes not known at compile time. class FasterRCNNNASFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with NASNet-A feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNNASFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with NAS preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the NASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(nasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = nasnet.build_nasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_11') # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016. rpn_feature_map = tf.concat([end_points['Cell_10'], end_points['Cell_11']], 3) # nasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the NASNet-A network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_nasnet_large() within # nasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = nasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network # -- Add 2 for the reduction cells. total_num_cells = hparams.num_cells + 2 # -- And add 2 for the stem cells for ImageNet training. total_num_cells += 2 normal_cell = nasnet_utils.NasNetANormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) reduction_cell = nasnet_utils.NasNetAReductionCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_11' used by # by _extract_proposal_features(). start_cell_num = 12 # Note that this number equals: # start_cell_num + 2 stem cells + 1 reduction cell true_cell_num = 15 with slim.arg_scope(nasnet.nasnet_large_arg_scope()): net = _build_nasnet_base(hidden_previous, hidden, normal_cell=normal_cell, reduction_cell=reduction_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for NASNet-A checkpoints. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ # Note that the NAS checkpoint only contains the moving average version of # the Variables so we need to generate an appropriate dictionary mapping. variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_nas_feature_extractor.py
faster_rcnn_nas_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import resnet_v1 class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD FPN feature extractor based on Resnet v1 architecture.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_base_fn, resnet_scope_name, fpn_scope_name, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. resnet_base_fn: base resnet network to use. resnet_scope_name: scope name under which to construct resnet fpn_scope_name: scope name under which to construct the feature pyramid network. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: On supplying invalid arguments for unused arguments. """ super(SSDResnetV1FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if self._use_explicit_padding is True: raise ValueError('Explicit padding is not a valid option.') self._resnet_base_fn = resnet_base_fn self._resnet_scope_name = resnet_scope_name self._fpn_scope_name = fpn_scope_name self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _filter_features(self, image_features): # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead # of munging the scope here. filtered_image_features = dict({}) for key, feature in image_features.items(): feature_name = key.split('/')[-1] if feature_name in ['block1', 'block2', 'block3', 'block4']: filtered_image_features[feature_name] = feature return filtered_image_features def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, min_base_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) image_features = self._filter_features(image_features) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( base_fpn_max_level - 1)] # Construct coarse features for i in range(base_fpn_max_level, self._fpn_max_level): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) feature_maps.append(last_feature_map) return feature_maps class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet50 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet50V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet101 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet101V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): """SSD Resnet152 V1 FPN feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. UNUSED currently. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDResnet152V1FpnFeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', 'fpn', fpn_min_level, fpn_max_level, additional_layer_depth, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, use_native_resize_op=use_native_resize_op, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py
ssd_resnet_v1_fpn_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.utils import ops from object_detection.utils import shape_utils # Activation bound used for TPU v1. Activations will be clipped to # [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with # use_bounded_activations enabled. ACTIVATION_BOUND = 6.0 def get_depth_fn(depth_multiplier, min_depth): """Builds a callable to compute depth (output channels) of conv filters. Args: depth_multiplier: a multiplier for the nominal depth. min_depth: a lower bound on the depth of filters. Returns: A callable that takes in a nominal depth and returns the depth to use. """ def multiply_depth(depth): new_depth = int(depth * depth_multiplier) return max(new_depth, min_depth) return multiply_depth def create_conv_block( use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, is_training, freeze_batchnorm, depth): """Create Keras layers for depthwise & non-depthwise convolutions. Args: use_depthwise: Whether to use depthwise separable conv instead of regular conv. kernel_size: A list of length 2: [kernel_height, kernel_width] of the filters. Can be an int if both values are the same. padding: One of 'VALID' or 'SAME'. stride: A list of length 2: [stride_height, stride_width], specifying the convolution stride. Can be an int if both strides are the same. layer_name: String. The name of the layer. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. is_training: Indicates whether the feature generator is in training mode. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. depth: Depth of output feature maps. Returns: A list of conv layers. """ layers = [] if use_depthwise: kwargs = conv_hyperparams.params() # Both the regularizer and initializer apply to the depthwise layer, # so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] layers.append( tf.keras.layers.SeparableConv2D( depth, [kernel_size, kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **kwargs)) else: layers.append(tf.keras.layers.Conv2D( depth, [kernel_size, kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( conv_hyperparams.build_activation_layer( name=layer_name)) return layers class KerasMultiResolutionFeatureMaps(tf.keras.Model): """Generates multi resolution feature maps from input image features. A Keras model that generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, when called on inputs it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } When this feature generator object is called on input image_features: Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ def __init__(self, feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, is_training, conv_hyperparams, freeze_batchnorm, name=None): """Constructor. Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasMultiResolutionFeatureMaps, self).__init__(name=name) self.feature_map_layout = feature_map_layout self.convolutions = [] depth_fn = get_depth_fn(depth_multiplier, min_depth) base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): net = [] layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: base_from_layer = from_layer else: if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth // 2)) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' # We define this function here while capturing the value of # conv_kernel_size, to avoid holding a reference to the loop variable # conv_kernel_size inside of a lambda function def fixed_padding(features, kernel_size=conv_kernel_size): return ops.fixed_padding(features, kernel_size) net.append(tf.keras.layers.Lambda(fixed_padding)) # TODO(rathodv): Add some utilities to simplify the creation of # Depthwise & non-depthwise convolutions w/ normalization & activations if use_depthwise: net.append(tf.keras.layers.DepthwiseConv2D( [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_depthwise_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name + '_depthwise')) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) else: net.append(tf.keras.layers.Conv2D( depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self.convolutions.append(net) def call(self, image_features): """Generate the multi-resolution feature maps. Executed when calling the `.__call__` method on input. Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ feature_maps = [] feature_map_keys = [] for index, from_layer in enumerate(self.feature_map_layout['from_layer']): if from_layer: feature_map = image_features[from_layer] feature_map_keys.append(from_layer) else: feature_map = feature_maps[-1] for layer in self.convolutions[index]: feature_map = layer(feature_map) layer_name = self.convolutions[index][-1].name feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, image_features, pool_residual=False): """Generates multi resolution feature maps from input image features. Generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. image_features: A dictionary of handles to activation tensors from the base feature extractor. pool_residual: Whether to add an average pooling layer followed by a residual connection between subsequent feature maps when the channel depth match. For example, with option 'layer_depth': [-1, 512, 256, 256], a pooling and residual layer is added between the third and forth feature map. This option is better used with Weight Shared Convolution Box Predictor when all feature maps have the same channel depth to encourage more consistent features across multi-scale feature maps. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: if the number entries in 'from_layer' and 'layer_depth' do not match. ValueError: if the generated layer does not have the same resolution as specified. """ depth_fn = get_depth_fn(depth_multiplier, min_depth) feature_map_keys = [] feature_maps = [] base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: feature_map = image_features[from_layer] base_from_layer = from_layer feature_map_keys.append(from_layer) else: pre_layer = feature_maps[-1] pre_layer_depth = pre_layer.get_shape().as_list()[3] intermediate_layer = pre_layer if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth // 2)) intermediate_layer = slim.conv2d( pre_layer, depth_fn(layer_depth // 2), [1, 1], padding='SAME', stride=1, scope=layer_name) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' intermediate_layer = ops.fixed_padding( intermediate_layer, conv_kernel_size) if use_depthwise: feature_map = slim.separable_conv2d( intermediate_layer, None, [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, stride=stride, scope=layer_name + '_depthwise') feature_map = slim.conv2d( feature_map, depth_fn(layer_depth), [1, 1], padding='SAME', stride=1, scope=layer_name) if pool_residual and pre_layer_depth == depth_fn(layer_depth): if use_explicit_padding: pre_layer = ops.fixed_padding(pre_layer, conv_kernel_size) feature_map += slim.avg_pool2d( pre_layer, [conv_kernel_size, conv_kernel_size], padding=padding, stride=2, scope=layer_name + '_pool') else: feature_map = slim.conv2d( intermediate_layer, depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, stride=stride, scope=layer_name) feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) class KerasFpnTopDownFeatureMaps(tf.keras.Model): """Generates Keras based `top-down` feature maps for Feature Pyramid Networks. See https://arxiv.org/abs/1612.03144 for details. """ def __init__(self, num_levels, depth, is_training, conv_hyperparams, freeze_batchnorm, use_depthwise=False, use_explicit_padding=False, use_bounded_activations=False, use_native_resize_op=False, scope=None, name=None): """Constructor. Args: num_levels: the number of image features. depth: depth of output feature maps. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: whether to use depthwise separable conv instead of regular conv. use_explicit_padding: whether to use explicit padding. use_bounded_activations: Whether or not to clip activations to range [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend themselves to quantized inference. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for the upsampling process instead of reshape and broadcasting implementation. scope: A scope name to wrap this op under. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasFpnTopDownFeatureMaps, self).__init__(name=name) self.scope = scope if scope else 'top_down' self.top_layers = [] self.residual_blocks = [] self.top_down_blocks = [] self.reshape_blocks = [] self.conv_layers = [] padding = 'VALID' if use_explicit_padding else 'SAME' stride = 1 kernel_size = 3 def clip_by_value(features): return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND) # top layers self.top_layers.append(tf.keras.layers.Conv2D( depth, [1, 1], strides=stride, padding=padding, name='projection_%d' % num_levels, **conv_hyperparams.params(use_bias=True))) if use_bounded_activations: self.top_layers.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) for level in reversed(list(range(num_levels - 1))): # to generate residual from image features residual_net = [] # to preprocess top_down (the image feature map from last layer) top_down_net = [] # to reshape top_down according to residual if necessary reshaped_residual = [] # to apply convolution layers to feature map conv_net = [] # residual block residual_net.append(tf.keras.layers.Conv2D( depth, [1, 1], padding=padding, strides=1, name='projection_%d' % (level + 1), **conv_hyperparams.params(use_bias=True))) if use_bounded_activations: residual_net.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) # top-down block # TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling if use_native_resize_op: def resize_nearest_neighbor(image): image_shape = shape_utils.combined_static_and_dynamic_shape(image) return tf.image.resize_nearest_neighbor( image, [image_shape[1] * 2, image_shape[2] * 2]) top_down_net.append(tf.keras.layers.Lambda( resize_nearest_neighbor, name='nearest_neighbor_upsampling')) else: def nearest_neighbor_upsampling(image): return ops.nearest_neighbor_upsampling(image, scale=2) top_down_net.append(tf.keras.layers.Lambda( nearest_neighbor_upsampling, name='nearest_neighbor_upsampling')) # reshape block if use_explicit_padding: def reshape(inputs): residual_shape = tf.shape(inputs[0]) return inputs[1][:, :residual_shape[1], :residual_shape[2], :] reshaped_residual.append( tf.keras.layers.Lambda(reshape, name='reshape')) # down layers if use_bounded_activations: conv_net.append(tf.keras.layers.Lambda( clip_by_value, name='clip_by_value')) if use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) conv_net.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'smoothing_%d' % (level + 1) conv_block = create_conv_block( use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, is_training, freeze_batchnorm, depth) conv_net.extend(conv_block) self.residual_blocks.append(residual_net) self.top_down_blocks.append(top_down_net) self.reshape_blocks.append(reshaped_residual) self.conv_layers.append(conv_net) def call(self, image_features): """Generate the multi-resolution feature maps. Executed when calling the `.__call__` method on input. Args: image_features: list of tuples of (tensor_name, image_feature_tensor). Spatial resolutions of succesive tensors must reduce exactly by a factor of 2. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ output_feature_maps_list = [] output_feature_map_keys = [] with tf.name_scope(self.scope): top_down = image_features[-1][1] for layer in self.top_layers: top_down = layer(top_down) output_feature_maps_list.append(top_down) output_feature_map_keys.append('top_down_%s' % image_features[-1][0]) num_levels = len(image_features) for index, level in enumerate(reversed(list(range(num_levels - 1)))): residual = image_features[level][1] top_down = output_feature_maps_list[-1] for layer in self.residual_blocks[index]: residual = layer(residual) for layer in self.top_down_blocks[index]: top_down = layer(top_down) for layer in self.reshape_blocks[index]: top_down = layer([residual, top_down]) top_down += residual for layer in self.conv_layers[index]: top_down = layer(top_down) output_feature_maps_list.append(top_down) output_feature_map_keys.append('top_down_%s' % image_features[level][0]) return collections.OrderedDict(reversed( list(zip(output_feature_map_keys, output_feature_maps_list)))) def fpn_top_down_feature_maps(image_features, depth, use_depthwise=False, use_explicit_padding=False, use_bounded_activations=False, scope=None, use_native_resize_op=False): """Generates `top-down` feature maps for Feature Pyramid Networks. See https://arxiv.org/abs/1612.03144 for details. Args: image_features: list of tuples of (tensor_name, image_feature_tensor). Spatial resolutions of succesive tensors must reduce exactly by a factor of 2. depth: depth of output feature maps. use_depthwise: whether to use depthwise separable conv instead of regular conv. use_explicit_padding: whether to use explicit padding. use_bounded_activations: Whether or not to clip activations to range [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend themselves to quantized inference. scope: A scope name to wrap this op under. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for the upsampling process instead of reshape and broadcasting implementation. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ with tf.name_scope(scope, 'top_down'): num_levels = len(image_features) output_feature_maps_list = [] output_feature_map_keys = [] padding = 'VALID' if use_explicit_padding else 'SAME' kernel_size = 3 with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1): top_down = slim.conv2d( image_features[-1][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % num_levels) if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) output_feature_maps_list.append(top_down) output_feature_map_keys.append( 'top_down_%s' % image_features[-1][0]) for level in reversed(list(range(num_levels - 1))): if use_native_resize_op: with tf.name_scope('nearest_neighbor_upsampling'): top_down_shape = shape_utils.combined_static_and_dynamic_shape( top_down) top_down = tf.image.resize_nearest_neighbor( top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2]) else: top_down = ops.nearest_neighbor_upsampling(top_down, scale=2) residual = slim.conv2d( image_features[level][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % (level + 1)) if use_bounded_activations: residual = tf.clip_by_value(residual, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_explicit_padding: # slice top_down to the same shape as residual residual_shape = tf.shape(residual) top_down = top_down[:, :residual_shape[1], :residual_shape[2], :] top_down += residual if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d pre_output = top_down if use_explicit_padding: pre_output = ops.fixed_padding(pre_output, kernel_size) output_feature_maps_list.append(conv_op( pre_output, depth, [kernel_size, kernel_size], scope='smoothing_%d' % (level + 1))) output_feature_map_keys.append('top_down_%s' % image_features[level][0]) return collections.OrderedDict(reversed( list(zip(output_feature_map_keys, output_feature_maps_list)))) def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers, image_features, replace_pool_with_conv=False): """Generates pooling pyramid feature maps. The pooling pyramid feature maps is motivated by multi_resolution_feature_maps. The main difference are that it is simpler and reduces the number of free parameters. More specifically: - Instead of using convolutions to shrink the feature map, it uses max pooling, therefore totally gets rid of the parameters in convolution. - By pooling feature from larger map up to a single cell, it generates features in the same feature space. - Instead of independently making box predictions from individual maps, it shares the same classifier across different feature maps, therefore reduces the "mis-calibration" across different scales. See go/ppn-detection for more details. Args: base_feature_map_depth: Depth of the base feature before the max pooling. num_layers: Number of layers used to make predictions. They are pooled from the base feature. image_features: A dictionary of handles to activation tensors from the feature extractor. replace_pool_with_conv: Whether or not to replace pooling operations with convolutions in the PPN. Default is False. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: image_features does not contain exactly one entry """ if len(image_features) != 1: raise ValueError('image_features should be a dictionary of length 1.') image_features = image_features[list(image_features.keys())[0]] feature_map_keys = [] feature_maps = [] feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth if base_feature_map_depth > 0: image_features = slim.conv2d( image_features, base_feature_map_depth, [1, 1], # kernel size padding='SAME', stride=1, scope=feature_map_key) # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for # TPU v1 compatibility. Without the following dummy op, TPU runtime # compiler will combine the convolution with one max-pooling below into a # single cycle, so getting the conv2d feature becomes impossible. image_features = slim.max_pool2d( image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(image_features) feature_map = image_features if replace_pool_with_conv: with slim.arg_scope([slim.conv2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i, base_feature_map_depth) feature_map = slim.conv2d( feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) else: with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'MaxPool2d_%d_2x2' % i feature_map = slim.max_pool2d( feature_map, [2, 2], padding='SAME', scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/feature_map_generators.py
feature_map_generators.py
"""SSDFeatureExtractor for InceptionV2 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils from nets import inception_v2 class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using InceptionV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """InceptionV2 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: If `override_base_feature_extractor_hyperparams` is False. """ super(SSDInceptionV2FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if not self._override_base_feature_extractor_hyperparams: raise ValueError('SSD Inception V2 feature extractor always uses' 'scope returned by `conv_hyperparams_fn` for both the ' 'base feature extractor and the additional layers ' 'added since there is no arg_scope defined for the base ' 'feature extractor.') def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v2_feature_extractor.py
ssd_inception_v2_feature_extractor.py
import collections import functools from six.moves import range from six.moves import zip import tensorflow as tf from object_detection.utils import bifpn_utils def _create_bifpn_input_config(fpn_min_level, fpn_max_level, input_max_level, level_scales=None): """Creates a BiFPN input config for the input levels from a backbone network. Args: fpn_min_level: the minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: the maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels from input_max_level, up to the desired fpn_max_level. level_scales: a list of pyramid level scale factors. If 'None', each level's scale is set to 2^level by default, which corresponds to each successive feature map scaling by a factor of 2. Returns: A list of dictionaries for each feature map expected as input to the BiFPN, where each has entries for the feature map 'name' and 'scale'. """ if not level_scales: level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] bifpn_input_params = [] for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1): bifpn_input_params.append({ 'name': '0_up_lvl_{}'.format(i), 'scale': level_scales[i - fpn_min_level] }) return bifpn_input_params def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config): """Returns a list of BiFPN output node names, given a BiFPN node config. Args: fpn_min_level: the minimum pyramid level (highest feature map resolution) used by the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) used by the BiFPN. node_config: the BiFPN node_config, a list of dictionaries corresponding to each node in the BiFPN computation graph, where each entry should have an associated 'name'. Returns: A list of strings corresponding to the names of the output BiFPN nodes. """ num_output_nodes = fpn_max_level - fpn_min_level + 1 return [node['name'] for node in node_config[-num_output_nodes:]] def _create_bifpn_node_config(bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, bifpn_node_params=None, level_scales=None): """Creates a config specifying a bidirectional feature pyramid network. Args: bifpn_num_iterations: the number of top-down bottom-up feature computations to repeat in the BiFPN. bifpn_num_filters: the number of filters (channels) for every feature map used in the BiFPN. fpn_min_level: the minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: the maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: the maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels from input_max_level, up to the desired fpn_max_level. bifpn_node_params: If not 'None', a dictionary of additional default BiFPN node parameters that will be applied to all BiFPN nodes. level_scales: a list of pyramid level scale factors. If 'None', each level's scale is set to 2^level by default, which corresponds to each successive feature map scaling by a factor of 2. Returns: A list of dictionaries used to define nodes in the BiFPN computation graph, as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070). Each node's entry has the corresponding keys: name: String. The name of this node in the BiFPN. The node name follows the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn' or 'up' refers to whether the node is in the top-down or bottom-up portion of a single BiFPN iteration. scale: the scale factor for this node, by default 2^level. inputs: A list of names of nodes which are inputs to this node. num_channels: The number of channels for this node. combine_method: String. Name of the method used to combine input node feature maps, 'fast_attention' by default for nodes which have more than one input. Otherwise, 'None' for nodes with only one input node. input_op: A (partial) function which is called to construct the layers that will be applied to this BiFPN node's inputs. This function is called with the arguments: input_op(name, input_scale, input_num_channels, output_scale, output_num_channels, conv_hyperparams, is_training, freeze_batchnorm) post_combine_op: A (partial) function which is called to construct the layers that will be applied to the result of the combine operation for this BiFPN node. This function will be called with the arguments: post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm) If 'None', then no layers will be applied after the combine operation for this node. """ if not level_scales: level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] default_node_params = { 'num_channels': bifpn_num_filters, 'combine_method': 'fast_attention', 'input_op': functools.partial( _create_bifpn_resample_block, downsample_method='max_pooling'), 'post_combine_op': functools.partial( bifpn_utils.create_conv_block, num_filters=bifpn_num_filters, kernel_size=3, strides=1, padding='SAME', use_separable=True, apply_batchnorm=True, apply_activation=True, conv_bn_act_pattern=False), } if bifpn_node_params: default_node_params.update(bifpn_node_params) bifpn_node_params = [] # Create additional base pyramid levels not provided as input to the BiFPN. # Note, combine_method and post_combine_op are set to None for additional # base pyramid levels because they do not combine multiple input BiFPN nodes. for i in range(input_max_level + 1, fpn_max_level + 1): node_params = dict(default_node_params) node_params.update({ 'name': '0_up_lvl_{}'.format(i), 'scale': level_scales[i - fpn_min_level], 'inputs': ['0_up_lvl_{}'.format(i - 1)], 'combine_method': None, 'post_combine_op': None, }) bifpn_node_params.append(node_params) for i in range(bifpn_num_iterations): # The first bottom-up feature pyramid (which includes the input pyramid # levels from the backbone network and the additional base pyramid levels) # is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is # indexed from 1, and repeated for bifpn_num_iterations iterations. bifpn_i = i + 1 # Create top-down nodes. for level_i in reversed(range(fpn_min_level, fpn_max_level)): inputs = [] # BiFPN nodes in the top-down pass receive input from the corresponding # level from the previous BiFPN iteration's bottom-up pass, except for the # bottom-most (min) level node, which is computed once in the initial # bottom-up pass, and is afterwards only computed in each top-down pass. if level_i > fpn_min_level or bifpn_i == 1: inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i)) else: inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i)) inputs.append(bifpn_node_params[-1]['name']) node_params = dict(default_node_params) node_params.update({ 'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i), 'scale': level_scales[level_i - fpn_min_level], 'inputs': inputs }) bifpn_node_params.append(node_params) # Create bottom-up nodes. for level_i in range(fpn_min_level + 1, fpn_max_level + 1): # BiFPN nodes in the bottom-up pass receive input from the corresponding # level from the preceding top-down pass, except for the top (max) level # which does not have a corresponding node in the top-down pass. inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)] if level_i < fpn_max_level: inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i)) inputs.append(bifpn_node_params[-1]['name']) node_params = dict(default_node_params) node_params.update({ 'name': '{}_up_lvl_{}'.format(bifpn_i, level_i), 'scale': level_scales[level_i - fpn_min_level], 'inputs': inputs }) bifpn_node_params.append(node_params) return bifpn_node_params def _create_bifpn_resample_block(name, input_scale, input_num_channels, output_scale, output_num_channels, conv_hyperparams, is_training, freeze_batchnorm, downsample_method=None, use_native_resize_op=False, maybe_apply_1x1_conv=True, apply_1x1_pre_sampling=True, apply_1x1_post_sampling=False): """Creates resample block layers for input feature maps to BiFPN nodes. Args: name: String. Name used for this block of layers. input_scale: Scale factor of the input feature map. input_num_channels: Number of channels in the input feature map. output_scale: Scale factor of the output feature map. output_num_channels: Number of channels in the output feature map. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. is_training: Indicates whether the feature generator is in training mode. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. downsample_method: String. Method to use when downsampling feature maps. use_native_resize_op: Bool. Whether to use the native resize up when upsampling feature maps. maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be applied if the input_num_channels differs from the output_num_channels. apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to the input feature map before the up/down-sampling operation. apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to the input feature map after the up/down-sampling operation. Returns: A list of layers which may be applied to the input feature maps in order to compute feature maps with the specified scale and number of channels. """ # By default, 1x1 convolutions are only applied before sampling when the # number of input and output channels differ. if maybe_apply_1x1_conv and output_num_channels == input_num_channels: apply_1x1_pre_sampling = False apply_1x1_post_sampling = False apply_bn_for_resampling = True layers = [] if apply_1x1_pre_sampling: layers.extend( bifpn_utils.create_conv_block( name=name + '1x1_pre_sample/', num_filters=output_num_channels, kernel_size=1, strides=1, padding='SAME', use_separable=False, apply_batchnorm=apply_bn_for_resampling, apply_activation=False, conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) layers.extend( bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale, downsample_method, use_native_resize_op, conv_hyperparams, is_training, freeze_batchnorm, name)) if apply_1x1_post_sampling: layers.extend( bifpn_utils.create_conv_block( name=name + '1x1_post_sample/', num_filters=output_num_channels, kernel_size=1, strides=1, padding='SAME', use_separable=False, apply_batchnorm=apply_bn_for_resampling, apply_activation=False, conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) return layers def _create_bifpn_combine_op(num_inputs, name, combine_method): """Creates a BiFPN output config, a list of the output BiFPN node names. Args: num_inputs: The number of inputs to this combine operation. name: String. The name of this combine operation. combine_method: String. The method used to combine input feature maps. Returns: A function which may be called with a list of num_inputs feature maps and which will return a single feature map. """ combine_op = None if num_inputs < 1: raise ValueError('Expected at least 1 input for BiFPN combine.') elif num_inputs == 1: combine_op = lambda x: x[0] else: combine_op = bifpn_utils.BiFPNCombineLayer( combine_method=combine_method, name=name) return combine_op class KerasBiFpnFeatureMaps(tf.keras.Model): """Generates Keras based BiFPN feature maps from an input feature map pyramid. A Keras model that generates multi-scale feature maps for detection by iteratively computing top-down and bottom-up feature pyramids, as in the EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details. """ def __init__(self, bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, is_training, conv_hyperparams, freeze_batchnorm, bifpn_node_params=None, name=None): """Constructor. Args: bifpn_num_iterations: The number of top-down bottom-up iterations. bifpn_num_filters: The number of filters (channels) to be used for all feature maps in this BiFPN. fpn_min_level: The minimum pyramid level (highest feature map resolution) to use in the BiFPN. fpn_max_level: The maximum pyramid level (lowest feature map resolution) to use in the BiFPN. input_max_level: The maximum pyramid level that will be provided as input to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid levels from input_max_level up to the desired fpn_max_level, with each successivel level downsampling by a scale factor of 2 by default. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. bifpn_node_params: An optional dictionary that may be used to specify default parameters for BiFPN nodes, without the need to provide a custom bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all BiFPN nodes will combine input feature maps by summation, rather than by the default fast attention method. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasBiFpnFeatureMaps, self).__init__(name=name) bifpn_node_config = _create_bifpn_node_config( bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, input_max_level, bifpn_node_params) bifpn_input_config = _create_bifpn_input_config( fpn_min_level, fpn_max_level, input_max_level) bifpn_output_node_names = _get_bifpn_output_node_names( fpn_min_level, fpn_max_level, bifpn_node_config) self.bifpn_node_config = bifpn_node_config self.bifpn_output_node_names = bifpn_output_node_names self.node_input_blocks = [] self.node_combine_op = [] self.node_post_combine_block = [] all_node_params = bifpn_input_config all_node_names = [node['name'] for node in all_node_params] for node_config in bifpn_node_config: # Maybe transform and/or resample input feature maps. input_blocks = [] for input_name in node_config['inputs']: if input_name not in all_node_names: raise ValueError( 'Input feature map ({}) does not exist:'.format(input_name)) input_index = all_node_names.index(input_name) input_params = all_node_params[input_index] input_block = node_config['input_op']( name='{}/input_{}/'.format(node_config['name'], input_name), input_scale=input_params['scale'], input_num_channels=input_params.get('num_channels', None), output_scale=node_config['scale'], output_num_channels=node_config['num_channels'], conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm) input_blocks.append((input_index, input_block)) # Combine input feature maps. combine_op = _create_bifpn_combine_op( num_inputs=len(input_blocks), name=(node_config['name'] + '/combine'), combine_method=node_config['combine_method']) # Post-combine layers. post_combine_block = [] if node_config['post_combine_op']: post_combine_block.extend(node_config['post_combine_op']( name=node_config['name'] + '/post_combine/', conv_hyperparams=conv_hyperparams, is_training=is_training, freeze_batchnorm=freeze_batchnorm)) self.node_input_blocks.append(input_blocks) self.node_combine_op.append(combine_op) self.node_post_combine_block.append(post_combine_block) all_node_params.append(node_config) all_node_names.append(node_config['name']) def call(self, feature_pyramid): """Compute BiFPN feature maps from input feature pyramid. Executed when calling the `.__call__` method on input. Args: feature_pyramid: list of tuples of (tensor_name, image_feature_tensor). Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ feature_maps = [el[1] for el in feature_pyramid] output_feature_maps = [None for node in self.bifpn_output_node_names] for index, node in enumerate(self.bifpn_node_config): node_scope = 'node_{:02d}'.format(index) with tf.name_scope(node_scope): # Apply layer blocks to this node's input feature maps. input_block_results = [] for input_index, input_block in self.node_input_blocks[index]: block_result = feature_maps[input_index] for layer in input_block: block_result = layer(block_result) input_block_results.append(block_result) # Combine the resulting feature maps. node_result = self.node_combine_op[index](input_block_results) # Apply post-combine layer block if applicable. for layer in self.node_post_combine_block[index]: node_result = layer(node_result) feature_maps.append(node_result) if node['name'] in self.bifpn_output_node_names: index = self.bifpn_output_node_names.index(node['name']) output_feature_maps[index] = node_result return collections.OrderedDict( zip(self.bifpn_output_node_names, output_feature_maps))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/bidirectional_feature_pyramid_generators.py
bidirectional_feature_pyramid_generators.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v2 from object_detection.utils import ops from object_detection.utils import shape_utils class SSDMobileNetV2KerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using MobilenetV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """MobileNetV2 Feature Extractor for SSD Models. Mobilenet v2 (experimental), designed by sandler@. More details can be found in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor (Functions as a width multiplier for the mobilenet_v2 network itself). min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV2KerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } self.classification_backbone = None self.feature_map_generator = None def build(self, input_shape): full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) conv2d_11_pointwise = full_mobilenet_v2.get_layer( name='block_13_expand_relu').output conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v2.inputs, outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) self.feature_map_generator = ( feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=self._feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_maps = self.feature_map_generator({ 'layer_15/expansion_output': image_features[0], 'layer_19': image_features[1]}) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py
ssd_mobilenet_v2_keras_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import inception_v2 def _batch_norm_arg_scope(list_ops, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, batch_norm_scale=False, train_batch_norm=False): """Slim arg scope for InceptionV2 batch norm.""" if use_batch_norm: batch_norm_params = { 'is_training': train_batch_norm, 'scale': batch_norm_scale, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon } normalizer_fn = slim.batch_norm else: normalizer_fn = None batch_norm_params = None return slim.arg_scope(list_ops, normalizer_fn=normalizer_fn, normalizer_params=batch_norm_params) class FasterRCNNInceptionV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Inception V2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, depth_multiplier=1.0, min_depth=16): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._depth_multiplier = depth_multiplier self._min_depth = min_depth super(FasterRCNNInceptionV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Inception V2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): _, activations = inception_v2.inception_v2_base( preprocessed_inputs, final_endpoint='Mixed_4e', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) return activations['Mixed_4e'], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ net = proposal_feature_maps depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth) trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) data_format = 'NHWC' concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope('InceptionV2', reuse=self._reuse_weights): with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], batch_norm_scale=True, train_batch_norm=self._train_batch_norm): with tf.variable_scope('Mixed_5a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat([branch_0, branch_1, branch_2], concat_dim) with tf.variable_scope('Mixed_5b'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat([branch_0, branch_1, branch_2, branch_3], concat_dim) with tf.variable_scope('Mixed_5c'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') proposal_classifier_features = tf.concat( [branch_0, branch_1, branch_2, branch_3], concat_dim) return proposal_classifier_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py
faster_rcnn_inception_v2_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper from nets import inception_resnet_v2 class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: return inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True) def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope='Conv2d_7b_1x1') return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO(jonathanhuang,rathodv): revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') var_name = var_name.replace( second_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py
faster_rcnn_inception_resnet_v2_feature_extractor.py
# Skip pylint for this file because it times out # pylint: skip-file import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models.keras_models import inception_resnet_v2 from object_detection.utils import model_util from object_detection.utils import variables_helper class FasterRCNNInceptionResnetV2KerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, weight_decay) self._variable_dict = {} self.classification_backbone = None def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A tensor with shape [batch, height, width, depth] """ if not self.classification_backbone: self.classification_backbone = inception_resnet_v2.inception_resnet_v2( self._train_batch_norm, output_stride=self._first_stage_features_stride, align_feature_maps=True, weight_decay=self._weight_decay, weights=None, include_top=False) with tf.name_scope(name): with tf.name_scope('InceptionResnetV2'): proposal_features = self.classification_backbone.get_layer( name='block17_20_ac').output keras_model = tf.keras.Model( inputs=self.classification_backbone.inputs, outputs=proposal_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `get_proposal_feature_extractor_model`. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ if not self.classification_backbone: self.classification_backbone = inception_resnet_v2.inception_resnet_v2( self._train_batch_norm, output_stride=self._first_stage_features_stride, align_feature_maps=True, weight_decay=self._weight_decay, weights=None, include_top=False) with tf.name_scope(name): with tf.name_scope('InceptionResnetV2'): proposal_feature_maps = self.classification_backbone.get_layer( name='block17_20_ac').output proposal_classifier_features = self.classification_backbone.get_layer( name='conv_7b_ac').output keras_model = model_util.extract_submodel( model=self.classification_backbone, inputs=proposal_feature_maps, outputs=proposal_classifier_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py
faster_rcnn_inception_resnet_v2_keras_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from nets import mobilenet_v1 class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """Embedded-friendly SSD Feature Extractor using MobilenetV1 features. This feature extractor is similar to SSD MobileNetV1 feature extractor, and it fixes input resolution to be 256x256, reduces the number of feature maps used for box prediction and ensures convolution kernel to be no larger than input tensor in spatial dimensions. This feature extractor requires support of the following ops if used in embedded devices: - Conv - DepthwiseConv - Relu6 All conv/depthwiseconv use SAME padding, and no additional spatial padding is needed. """ def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. For EmbeddedSSD it must be set to 1. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: upon invalid `pad_to_multiple` values. """ if pad_to_multiple != 1: raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` ' 'of 1.') super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__( is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: if image height or width are not 256 pixels. """ image_shape = preprocessed_inputs.get_shape() image_shape.assert_has_rank(4) image_height = image_shape[1].value image_width = image_shape[2].value if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256), tf.equal(tf.shape(preprocessed_inputs)[2], 256)), ['image size must be 256 in both height and width.']) with tf.control_dependencies([shape_assert]): preprocessed_inputs = tf.identity(preprocessed_inputs) elif image_height != 256 or image_width != 256: raise ValueError('image size must be = 256 in both height and width;' ' image dim = %d,%d' % (image_height, image_width)) feature_map_layout = { 'from_layer': [ 'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '' ], 'layer_depth': [-1, -1, 512, 256, 256], 'conv_kernel_size': [-1, -1, 3, 3, 2], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py
embedded_ssd_mobilenet_v1_feature_extractor.py
"""Mobilenet v1 Faster R-CNN implementation.""" import numpy as np import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import shape_utils from nets import mobilenet_v1 def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage): if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]: raise ValueError( 'Only the following ratio percentages are supported: 25, 50, 75, 100') conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0 channels = np.array([ 32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024 ], dtype=np.float32) channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32) return [ mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13]) ] class FasterRCNNMobilenetV1FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Mobilenet V1 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, depth_multiplier=1.0, min_depth=16, skip_last_stride=False, conv_depth_ratio_in_percentage=100): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. skip_last_stride: Skip the last stride if True. conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only applied if skip_last_stride is True. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._skip_last_stride = skip_last_stride self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Mobilenet V1 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ preprocessed_inputs.get_shape().assert_has_rank(4) preprocessed_inputs = shape_utils.check_min_image_dim( min_dim=33, image_tensor=preprocessed_inputs) with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=self._train_batch_norm, weight_decay=self._weight_decay)): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: params = {} if self._skip_last_stride: params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs( conv_depth_ratio_in_percentage=self. _conv_depth_ratio_in_percentage) _, activations = mobilenet_v1.mobilenet_v1_base( preprocessed_inputs, final_endpoint='Conv2d_11_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope, **params) return activations['Conv2d_11_pointwise'], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ net = proposal_feature_maps conv_depth = 1024 if self._skip_last_stride: conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0 conv_depth = int(float(conv_depth) * conv_depth_ratio) depth = lambda d: max(int(d * 1.0), 16) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights): with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=self._train_batch_norm, weight_decay=self._weight_decay)): with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], padding='SAME'): net = slim.separable_conv2d( net, depth(conv_depth), [3, 3], depth_multiplier=1, stride=2, scope='Conv2d_12_pointwise') return slim.separable_conv2d( net, depth(conv_depth), [3, 3], depth_multiplier=1, stride=1, scope='Conv2d_13_pointwise')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py
faster_rcnn_mobilenet_v1_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 PPN features.""" def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.pooling_pyramid_feature_maps( base_feature_map_depth=0, num_layers=6, image_features={ 'image_features': image_features['Conv2d_11_pointwise'] }) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py
ssd_mobilenet_v1_ppn_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import variables_helper try: from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Default is False. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with slim.arg_scope(imagenet_scope): with slim.arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using PNASNet features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """PNASNet Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDPNASNetFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ feature_map_layout = { 'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope( pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._is_training)): with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = pnasnet.build_pnasnet_large( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training, final_endpoint='Cell_11') with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet checkpoints. Args: feature_extractor_scope: A scope name for the first stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith(feature_extractor_scope): var_name = variable.op.name.replace(feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_pnasnet_feature_extractor.py
ssd_pnasnet_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models.keras_models import resnet_v1 from object_detection.utils import model_util _RESNET_MODEL_CONV4_LAST_LAYERS = { 'resnet_v1_50': 'conv4_block6_out', 'resnet_v1_101': 'conv4_block23_out', 'resnet_v1_152': 'conv4_block36_out', } class FasterRCNNResnetKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster R-CNN with Resnet feature extractor implementation.""" def __init__(self, is_training, resnet_v1_base_model, resnet_v1_base_model_name, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNResnetKerasFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, weight_decay) self.classification_backbone = None self._variable_dict = {} self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the first half of the Resnet v1 network. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A tensor with shape [batch, height, width, depth] """ if not self.classification_backbone: self.classification_backbone = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=None, weight_decay=self._weight_decay, classes=None, weights=None, include_top=False ) with tf.name_scope(name): with tf.name_scope('ResnetV1'): conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ self._resnet_v1_base_model_name] proposal_features = self.classification_backbone.get_layer( name=conv4_last_layer).output keras_model = tf.keras.Model( inputs=self.classification_backbone.inputs, outputs=proposal_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. This function reconstructs the "second half" of the ResNet v1 network after the part defined in `get_proposal_feature_extractor_model`. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ if not self.classification_backbone: self.classification_backbone = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=None, weight_decay=self._weight_decay, classes=None, weights=None, include_top=False ) with tf.name_scope(name): with tf.name_scope('ResnetV1'): conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ self._resnet_v1_base_model_name] proposal_feature_maps = self.classification_backbone.get_layer( name=conv4_last_layer).output proposal_classifier_features = self.classification_backbone.get_layer( name='conv5_block3_out').output keras_model = model_util.extract_submodel( model=self.classification_backbone, inputs=proposal_feature_maps, outputs=proposal_classifier_features) for variable in keras_model.variables: self._variable_dict[variable.name[:-2]] = variable return keras_model class FasterRCNNResnet50KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet50 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) class FasterRCNNResnet101KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet101 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) class FasterRCNNResnet152KerasFeatureExtractor( FasterRCNNResnetKerasFeatureExtractor): """Faster R-CNN with Resnet152 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. weight_decay: See base class. """ super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__( is_training=is_training, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py
faster_rcnn_resnet_keras_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 from object_detection.utils import ops _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', 'conv4_block36_out', 'conv5_block3_out'], } class _ResnetFPN(tf.keras.layers.Layer): """Construct Resnet FPN layer.""" def __init__(self, backbone_classifier, fpn_features_generator, coarse_feature_layers, pad_to_multiple, fpn_min_level, resnet_block_names, base_fpn_max_level): """Constructor. Args: backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152'. fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a dictionary of features and returns a ordered dictionary of fpn features. coarse_feature_layers: Coarse feature layers for fpn. pad_to_multiple: An integer multiple to pad input image. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet v1 layers. resnet_block_names: a list of block names of resnet. base_fpn_max_level: maximum level of fpn without coarse feature layers. """ super(_ResnetFPN, self).__init__() self.classification_backbone = backbone_classifier self.fpn_features_generator = fpn_features_generator self.coarse_feature_layers = coarse_feature_layers self.pad_to_multiple = pad_to_multiple self._fpn_min_level = fpn_min_level self._resnet_block_names = resnet_block_names self._base_fpn_max_level = base_fpn_max_level def call(self, inputs): """Create internal Resnet FPN layer. Args: inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. Returns: feature_maps: A list of tensors with shape [batch, height, width, depth] represent extracted features. """ inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) backbone_outputs = self.classification_backbone(inputs) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, backbone_outputs))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self.fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) last_feature_map = fpn_features['top_down_block{}'.format( self._base_fpn_max_level - 1)] for coarse_feature_layers in self.coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps class FasterRCNNResnetV1FpnKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" def __init__(self, is_training, resnet_v1_base_model, resnet_v1_base_model_name, first_stage_features_stride, conv_hyperparams, batch_norm_trainable=True, pad_to_multiple=32, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. first_stage_features_stride: See base class. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. batch_norm_trainable: See base class. pad_to_multiple: An integer multiple to pad input image. weight_decay: See base class. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet v1 layers. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay) self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name self._conv_hyperparams = conv_hyperparams self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._freeze_batchnorm = (not batch_norm_trainable) self._pad_to_multiple = pad_to_multiple self._override_base_feature_extractor_hyperparams = \ override_base_feature_extractor_hyperparams self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the Resnet v1 FPN network. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A list of tensors with shape [batch, height, width, depth] """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[ self._resnet_v1_base_model_name] outputs = [full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._additional_layer_depth, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D( self._additional_layer_depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) feature_extractor_model = _ResnetFPN(self.classification_backbone, self._fpn_features_generator, self._coarse_feature_layers, self._pad_to_multiple, self._fpn_min_level, self._resnet_block_names, self._base_fpn_max_level) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. Construct two fully connected layer to extract the box classifier features. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. And returns proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, 1, 1, 1024] representing box classifier features for each proposal. """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): feature_extractor_model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=1024, activation='relu'), self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm)), tf.keras.layers.Dense(units=1024, activation='relu'), tf.keras.layers.Reshape((1, 1, 1024)) ]) return feature_extractor_model class FasterRCNNResnet50FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet50 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams ) class FasterRCNNResnet101FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet101 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) class FasterRCNNResnet152FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet152 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', batch_norm_trainable=batch_norm_trainable, weight_decay=weight_decay, fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py
faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py
"""SSDFeatureExtractor for MobileDet features.""" import functools import numpy as np import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils BACKBONE_WEIGHT_DECAY = 4e-5 def _scale_filters(filters, multiplier, base=8): """Scale the filters accordingly to (multiplier, base).""" round_half_up = int(int(filters) * multiplier / base + 0.5) result = int(round_half_up * base) return max(result, base) def _swish6(h): with tf.name_scope('swish6'): return h * tf.nn.relu6(h + np.float32(3)) * np.float32(1. / 6.) def _conv(h, filters, kernel_size, strides=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6): if activation_fn is None: raise ValueError('Activation function cannot be None. Use tf.identity ' 'instead to better support quantized training.') return slim.conv2d( h, filters, kernel_size, stride=strides, activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), padding='SAME') def _separable_conv( h, filters, kernel_size, strides=1, activation_fn=tf.nn.relu6): """Separable convolution layer.""" if activation_fn is None: raise ValueError('Activation function cannot be None. Use tf.identity ' 'instead to better support quantized training.') # Depthwise variant of He initialization derived under the principle proposed # in the original paper. Note the original He normalization was designed for # full convolutions and calling tf.initializers.he_normal() can over-estimate # the fan-in of a depthwise kernel by orders of magnitude. stddev = (2.0 / kernel_size**2)**0.5 / .87962566103423978 depthwise_initializer = tf.initializers.truncated_normal(stddev=stddev) return slim.separable_conv2d( h, filters, kernel_size, stride=strides, activation_fn=activation_fn, normalizer_fn=slim.batch_norm, weights_initializer=depthwise_initializer, pointwise_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), padding='SAME') def _squeeze_and_excite(h, hidden_dim, activation_fn=tf.nn.relu6): with tf.variable_scope(None, default_name='SqueezeExcite'): height, width = h.shape[1], h.shape[2] u = slim.avg_pool2d(h, [height, width], stride=1, padding='VALID') u = _conv(u, hidden_dim, 1, normalizer_fn=None, activation_fn=activation_fn) u = _conv(u, h.shape[-1], 1, normalizer_fn=None, activation_fn=tf.nn.sigmoid) return u * h def _inverted_bottleneck_no_expansion( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, strides=1, use_se=False): """Inverted bottleneck layer without the first 1x1 expansion convolution.""" with tf.variable_scope(None, default_name='IBNNoExpansion'): # Setting filters to None will make _separable_conv a depthwise conv. h = _separable_conv( h, None, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(h.shape[-1], 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) return h def _inverted_bottleneck( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): """Inverted bottleneck layer.""" with tf.variable_scope(None, default_name='IBN'): shortcut = h expanded_filters = int(h.shape[-1]) * expansion if expansion <= 1: raise ValueError('Expansion factor must be greater than 1.') h = _conv(h, expanded_filters, 1, activation_fn=activation_fn) # Setting filters to None will make _separable_conv a depthwise conv. h = _separable_conv(h, None, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(expanded_filters, 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def _fused_conv( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): """Fused convolution layer.""" with tf.variable_scope(None, default_name='FusedConv'): shortcut = h expanded_filters = int(h.shape[-1]) * expansion if expansion <= 1: raise ValueError('Expansion factor must be greater than 1.') h = _conv(h, expanded_filters, kernel_size, strides=strides, activation_fn=activation_fn) if use_se: hidden_dim = _scale_filters(expanded_filters, 0.25) h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def _tucker_conv( h, filters, activation_fn=tf.nn.relu6, kernel_size=3, input_rank_ratio=0.25, output_rank_ratio=0.25, strides=1, residual=True): """Tucker convolution layer (generalized bottleneck).""" with tf.variable_scope(None, default_name='TuckerConv'): shortcut = h input_rank = _scale_filters(h.shape[-1], input_rank_ratio) h = _conv(h, input_rank, 1, activation_fn=activation_fn) output_rank = _scale_filters(filters, output_rank_ratio) h = _conv(h, output_rank, kernel_size, strides=strides, activation_fn=activation_fn) h = _conv(h, filters, 1, activation_fn=tf.identity) if residual: h = h + shortcut return h def mobiledet_cpu_backbone(h, multiplier=1.0): """Build a MobileDet CPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial( _inverted_bottleneck, use_se=True, activation_fn=_swish6) endpoints = {} h = _conv(h, _scale(16), 3, strides=2, activation_fn=_swish6) h = _inverted_bottleneck_no_expansion( h, _scale(8), use_se=True, activation_fn=_swish6) endpoints['C1'] = h h = ibn(h, _scale(16), expansion=4, strides=2, residual=False) endpoints['C2'] = h h = ibn(h, _scale(32), expansion=8, strides=2, residual=False) h = ibn(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) endpoints['C3'] = h h = ibn(h, _scale(72), kernel_size=5, expansion=8, strides=2, residual=False) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), kernel_size=5, expansion=4) h = ibn(h, _scale(72), expansion=4) h = ibn(h, _scale(72), expansion=8, residual=False) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), expansion=8) h = ibn(h, _scale(72), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(104), kernel_size=5, expansion=8, strides=2, residual=False) h = ibn(h, _scale(104), kernel_size=5, expansion=4) h = ibn(h, _scale(104), kernel_size=5, expansion=4) h = ibn(h, _scale(104), expansion=4) h = ibn(h, _scale(144), expansion=8, residual=False) endpoints['C5'] = h return endpoints def mobiledet_dsp_backbone(h, multiplier=1.0): """Build a MobileDet DSP backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) h = _inverted_bottleneck_no_expansion( h, _scale(24), activation_fn=tf.nn.relu6) endpoints['C1'] = h h = fused(h, _scale(32), expansion=4, strides=2, residual=False) h = fused(h, _scale(32), expansion=4) h = ibn(h, _scale(32), expansion=4) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.75) endpoints['C2'] = h h = fused(h, _scale(64), expansion=8, strides=2, residual=False) h = ibn(h, _scale(64), expansion=4) h = fused(h, _scale(64), expansion=4) h = fused(h, _scale(64), expansion=4) endpoints['C3'] = h h = fused(h, _scale(120), expansion=8, strides=2, residual=False) h = ibn(h, _scale(120), expansion=4) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(120), expansion=8) h = fused(h, _scale(144), expansion=8, residual=False) h = ibn(h, _scale(144), expansion=8) h = ibn(h, _scale(144), expansion=8) h = ibn(h, _scale(144), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(160), expansion=4, strides=2, residual=False) h = ibn(h, _scale(160), expansion=4) h = fused(h, _scale(160), expansion=4) h = tucker(h, _scale(160), input_rank_ratio=0.75, output_rank_ratio=0.75) h = ibn(h, _scale(240), expansion=8, residual=False) endpoints['C5'] = h return endpoints def mobiledet_edgetpu_backbone(h, multiplier=1.0): """Build a MobileDet EdgeTPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) h = tucker(h, _scale(16), input_rank_ratio=0.25, output_rank_ratio=0.75, residual=False) endpoints['C1'] = h h = fused(h, _scale(16), expansion=8, strides=2, residual=False) h = fused(h, _scale(16), expansion=4) h = fused(h, _scale(16), expansion=8) h = fused(h, _scale(16), expansion=4) endpoints['C2'] = h h = fused(h, _scale(40), expansion=8, kernel_size=5, strides=2, residual=False) h = fused(h, _scale(40), expansion=4) h = fused(h, _scale(40), expansion=4) h = fused(h, _scale(40), expansion=4) endpoints['C3'] = h h = ibn(h, _scale(72), expansion=8, strides=2, residual=False) h = ibn(h, _scale(72), expansion=8) h = fused(h, _scale(72), expansion=4) h = fused(h, _scale(72), expansion=4) h = ibn(h, _scale(96), expansion=8, kernel_size=5, residual=False) h = ibn(h, _scale(96), expansion=8, kernel_size=5) h = ibn(h, _scale(96), expansion=8) h = ibn(h, _scale(96), expansion=8) endpoints['C4'] = h h = ibn(h, _scale(120), expansion=8, kernel_size=5, strides=2, residual=False) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(120), expansion=4, kernel_size=5) h = ibn(h, _scale(120), expansion=8) h = ibn(h, _scale(384), expansion=8, kernel_size=5, residual=False) endpoints['C5'] = h return endpoints def mobiledet_gpu_backbone(h, multiplier=1.0): """Build a MobileDet GPU backbone.""" def _scale(filters): return _scale_filters(filters, multiplier) ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) endpoints = {} # block 0 h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) # block 1 h = tucker( h, _scale(16), input_rank_ratio=0.25, output_rank_ratio=0.25, residual=False) endpoints['C1'] = h # block 2 h = fused(h, _scale(32), expansion=8, strides=2, residual=False) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) endpoints['C2'] = h # block 3 h = fused( h, _scale(64), expansion=8, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(64), expansion=8) h = fused(h, _scale(64), expansion=8) h = fused(h, _scale(64), expansion=4) endpoints['C3'] = h # block 4 h = fused( h, _scale(128), expansion=8, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) # block 5 h = fused( h, _scale(128), expansion=8, kernel_size=3, strides=1, residual=False) h = fused(h, _scale(128), expansion=8) h = fused(h, _scale(128), expansion=8) h = fused(h, _scale(128), expansion=8) endpoints['C4'] = h # block 6 h = fused( h, _scale(128), expansion=4, kernel_size=3, strides=2, residual=False) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) h = fused(h, _scale(128), expansion=4) # block 7 h = ibn(h, _scale(384), expansion=8, kernel_size=3, strides=1, residual=False) endpoints['C5'] = h return endpoints class SSDMobileDetFeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): """Base class of SSD feature extractor using MobileDet features.""" def __init__(self, backbone_fn, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDet'): """MobileDet Feature Extractor for SSD Models. Reference: https://arxiv.org/abs/2004.14525 Args: backbone_fn: function to construct the MobileDet backbone. is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: Integer, minimum feature extractor depth (number of filters). pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. use_depthwise: Whether to use depthwise convolutions in the SSD head. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. scope_name: scope name (string) of network variables. """ if use_explicit_padding: raise NotImplementedError( 'Explicit padding is not yet supported in MobileDet backbones.') super(SSDMobileDetFeatureExtractorBase, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams ) self._backbone_fn = backbone_fn self._scope_name = scope_name def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. The preprocessing assumes an input value range of [0, 255]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) padded_inputs = ops.pad_to_multiple( preprocessed_inputs, self._pad_to_multiple) feature_map_layout = { 'from_layer': ['C4', 'C5', '', '', '', ''], # Do not specify the layer depths (number of filters) for C4 and C5, as # their values are determined based on the backbone. 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope(self._scope_name, reuse=self._reuse_weights): with slim.arg_scope([slim.batch_norm], is_training=self._is_training, epsilon=0.01, decay=0.99, center=True, scale=True): endpoints = self._backbone_fn( padded_inputs, multiplier=self._depth_multiplier) image_features = {'C4': endpoints['C4'], 'C5': endpoints['C5']} with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) class SSDMobileDetCPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-CPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetCPU'): super(SSDMobileDetCPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_cpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetDSPFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-DSP feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetDSP'): super(SSDMobileDetDSPFeatureExtractor, self).__init__( backbone_fn=mobiledet_dsp_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetEdgeTPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-EdgeTPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetEdgeTPU'): super(SSDMobileDetEdgeTPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_edgetpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name) class SSDMobileDetGPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): """MobileDet-GPU feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobileDetGPU'): super(SSDMobileDetGPUFeatureExtractor, self).__init__( backbone_fn=mobiledet_gpu_backbone, is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobiledet_feature_extractor.py
ssd_mobiledet_feature_extractor.py
"""SSD MobilenetV2 NAS-FPN Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 Block = collections.namedtuple( 'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size']) _MNASFPN_CELL_CONFIG = [ Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256), Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128), Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128), Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128), Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96), ] MNASFPN_DEF = dict( feature_levels=[3, 4, 5, 6], spec=[_MNASFPN_CELL_CONFIG] * 4, ) def _maybe_pad(feature, use_explicit_padding, kernel_size=3): return ops.fixed_padding(feature, kernel_size) if use_explicit_padding else feature # Wrapper around mobilenet.depth_multiplier def _apply_multiplier(d, multiplier, min_depth): p = {'num_outputs': d} mobilenet.depth_multiplier( p, multiplier=multiplier, divisible_by=8, min_depth=min_depth) return p['num_outputs'] def _apply_size_dependent_ordering(input_feature, feature_level, block_level, expansion_size, use_explicit_padding, use_native_resize_op): """Applies Size-Dependent-Ordering when resizing feature maps. See https://arxiv.org/abs/1912.01106 Args: input_feature: input feature map to be resized. feature_level: the level of the input feature. block_level: the desired output level for the block. expansion_size: the expansion size for the block. use_explicit_padding: Whether to use explicit padding. use_native_resize_op: Whether to use native resize op. Returns: A transformed feature at the desired resolution and expansion size. """ padding = 'VALID' if use_explicit_padding else 'SAME' if feature_level >= block_level: # Perform 1x1 then upsampling. node = slim.conv2d( input_feature, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') if feature_level == block_level: return node scale = 2**(feature_level - block_level) if use_native_resize_op: input_shape = shape_utils.combined_static_and_dynamic_shape(node) node = tf.image.resize_nearest_neighbor( node, [input_shape[1] * scale, input_shape[2] * scale]) else: node = ops.nearest_neighbor_upsampling(node, scale=scale) else: # Perform downsampling then 1x1. stride = 2**(block_level - feature_level) node = slim.max_pool2d( _maybe_pad(input_feature, use_explicit_padding), [3, 3], stride=[stride, stride], padding=padding, scope='Downsample') node = slim.conv2d( node, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') return node def _mnasfpn_cell(feature_maps, feature_levels, cell_spec, output_channel=48, use_explicit_padding=False, use_native_resize_op=False, multiplier_func=None): """Create a MnasFPN cell. Args: feature_maps: input feature maps. feature_levels: levels of the feature maps. cell_spec: A list of Block configs. output_channel: Number of features for the input, output and intermediate feature maps. use_explicit_padding: Whether to use explicit padding. use_native_resize_op: Whether to use native resize op. multiplier_func: Depth-multiplier function. If None, use identity function. Returns: A transformed list of feature maps at the same resolutions as the inputs. """ # This is the level where multipliers are realized. if multiplier_func is None: multiplier_func = lambda x: x num_outputs = len(feature_maps) cell_features = list(feature_maps) cell_levels = list(feature_levels) padding = 'VALID' if use_explicit_padding else 'SAME' for bi, block in enumerate(cell_spec): with tf.variable_scope('block_{}'.format(bi)): block_level = block.output_level intermediate_feature = None for i, inp in enumerate(block.inputs): with tf.variable_scope('input_{}'.format(i)): input_level = cell_levels[inp] node = _apply_size_dependent_ordering( cell_features[inp], input_level, block_level, multiplier_func(block.expansion_size), use_explicit_padding, use_native_resize_op) # Add features incrementally to avoid producing AddN, which doesn't # play well with TfLite. if intermediate_feature is None: intermediate_feature = node else: intermediate_feature += node node = tf.nn.relu6(intermediate_feature) node = slim.separable_conv2d( _maybe_pad(node, use_explicit_padding, block.kernel_size), multiplier_func(output_channel), block.kernel_size, activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='SepConv') cell_features.append(node) cell_levels.append(block_level) # Cell-wide residuals. out_idx = range(len(cell_features) - num_outputs, len(cell_features)) for in_i, out_i in enumerate(out_idx): if cell_features[out_i].shape.as_list( ) == cell_features[in_i].shape.as_list(): cell_features[out_i] += cell_features[in_i] return cell_features[-num_outputs:] def mnasfpn(feature_maps, head_def, output_channel=48, use_explicit_padding=False, use_native_resize_op=False, multiplier_func=None): """Create the MnasFPN head given head_def.""" features = feature_maps for ci, cell_spec in enumerate(head_def['spec']): with tf.variable_scope('cell_{}'.format(ci)): features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec, output_channel, use_explicit_padding, use_native_resize_op, multiplier_func) return features def training_scope(l2_weight_decay=1e-4, is_training=None): """Arg scope for training MnasFPN.""" with slim.arg_scope( [slim.conv2d], weights_initializer=tf.initializers.he_normal(), weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ slim.arg_scope( [slim.separable_conv2d], weights_initializer=tf.initializers.truncated_normal( stddev=0.536), # He_normal for 3x3 depthwise kernel. weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ slim.arg_scope([slim.batch_norm], is_training=is_training, epsilon=0.01, decay=0.99, center=True, scale=True) as s: return s class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 MnasFPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=6, additional_layer_depth=48, head_def=None, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, data_format='channels_last'): """SSD MnasFPN feature extractor based on Mobilenet v2 architecture. See https://arxiv.org/abs/1912.01106 Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in MnasFPN. Currently the only valid value is 3. fpn_max_level: the smallest resolution feature map to construct or use in MnasFPN. Currentl the only valid value is 6. additional_layer_depth: additional feature map layer channel depth for NAS-FPN. head_def: A dictionary specifying the MnasFPN head architecture. Default uses MNASFPN_DEF. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use native resize op. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. data_format: The ordering of the dimensions in the inputs, The valid values are {'channels_first', 'channels_last'). """ super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=( override_base_feature_extractor_hyperparams)) if fpn_min_level != 3 or fpn_max_level != 6: raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.') self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._fpn_layer_depth = additional_layer_depth self._head_def = head_def if head_def else MNASFPN_DEF self._data_format = data_format self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _verify_config(self, inputs): """Verify that MnasFPN config and its inputs.""" num_inputs = len(inputs) assert len(self._head_def['feature_levels']) == num_inputs base_width = inputs[0].shape.as_list( )[1] * 2**self._head_def['feature_levels'][0] for i in range(1, num_inputs): width = inputs[i].shape.as_list()[1] level = self._head_def['feature_levels'][i] expected_width = base_width // 2**level if width != expected_width: raise ValueError( 'Resolution of input {} does not match its level {}.'.format( i, level)) for cell_spec in self._head_def['spec']: # The last K nodes in a cell are the inputs to the next cell. Assert that # their feature maps are at the right level. for i in range(num_inputs): if cell_spec[-num_inputs + i].output_level != self._head_def['feature_levels'][i]: raise ValueError( 'Mismatch between node level {} and desired output level {}.' .format(cell_spec[-num_inputs + i].output_level, self._head_def['feature_levels'][i])) # Assert that each block only uses precending blocks. for bi, block_spec in enumerate(cell_spec): for inp in block_spec.inputs: if inp >= bi + num_inputs: raise ValueError( 'Block {} is trying to access uncreated block {}.'.format( bi, inp)) def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with slim.arg_scope( training_scope(l2_weight_decay=4e-5, is_training=self._is_training)): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_18', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) multiplier_func = functools.partial( _apply_multiplier, multiplier=self._depth_multiplier, min_depth=self._min_depth) with tf.variable_scope('MnasFPN', reuse=self._reuse_weights): with slim.arg_scope( training_scope(l2_weight_decay=1e-4, is_training=self._is_training)): # Create C6 by downsampling C5. c6 = slim.max_pool2d( _maybe_pad(image_features['layer_18'], self._use_explicit_padding), [3, 3], stride=[2, 2], padding='VALID' if self._use_explicit_padding else 'SAME', scope='C6_downsample') c6 = slim.conv2d( c6, multiplier_func(self._fpn_layer_depth), [1, 1], activation_fn=tf.identity, normalizer_fn=slim.batch_norm, weights_regularizer=None, # this 1x1 has no kernel regularizer. padding='VALID', scope='C6_Conv1x1') image_features['C6'] = tf.identity(c6) # Needed for quantization. for k in sorted(image_features.keys()): tf.logging.error('{}: {}'.format(k, image_features[k])) mnasfpn_inputs = [ image_features['layer_7'], # C3 image_features['layer_14'], # C4 image_features['layer_18'], # C5 image_features['C6'] # C6 ] self._verify_config(mnasfpn_inputs) feature_maps = mnasfpn( mnasfpn_inputs, head_def=self._head_def, output_channel=self._fpn_layer_depth, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, multiplier_func=multiplier_func) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py
ssd_mobilenet_v2_mnasfpn_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 class CenterNetMobileNetV2FeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The MobileNet V2 feature extractor for CenterNet.""" def __init__(self, mobilenet_v2_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Intializes the feature extractor. Args: mobilenet_v2_net: The underlying mobilenet_v2 network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetMobileNetV2FeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._network = mobilenet_v2_net output = self._network(self._network.input) # MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which # leads to a stride of 32. We perform upsampling to get it to a target # stride of 4. for num_filters in [256, 128, 64]: # 1. We use a simple convolution instead of a deformable convolution conv = tf.keras.layers.Conv2D( filters=num_filters, kernel_size=1, strides=1, padding='same') output = conv(output) output = tf.keras.layers.BatchNormalization()(output) output = tf.keras.layers.ReLU()(output) # 2. We use the default initialization for the convolution layers # instead of initializing it to do bilinear upsampling. conv_transpose = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=3, strides=2, padding='same') output = conv_transpose(output) output = tf.keras.layers.BatchNormalization()(output) output = tf.keras.layers.ReLU()(output) self._network = tf.keras.models.Model( inputs=self._network.input, outputs=output) def preprocess(self, resized_inputs): resized_inputs = super(CenterNetMobileNetV2FeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._network.load_weights(path) def call(self, inputs): return [self._network(inputs)] @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """The number of feature outputs returned by the feature extractor.""" return 1 @property def classification_backbone(self): return self._network def mobilenet_v2(channel_means, channel_stds, bgr_ordering, depth_multiplier=1.0, **kwargs): """The MobileNetV2 backbone for CenterNet.""" del kwargs # We set 'is_training' to True for now. network = mobilenetv2.mobilenet_v2( batchnorm_training=True, alpha=depth_multiplier, include_top=False, weights='imagenet' if depth_multiplier == 1.0 else None) return CenterNetMobileNetV2FeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_feature_extractor.py
center_net_mobilenet_v2_feature_extractor.py
"""SSDFeatureExtractor for MobileNetV3 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v3 class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): """Base class of SSD feature extractor using MobilenetV3 features.""" def __init__(self, conv_defs, from_layer, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): """MobileNetV3 Feature Extractor for SSD Models. MobileNet v3. Details found in: https://arxiv.org/abs/1905.02244 Args: conv_defs: MobileNetV3 conv defs for backbone. from_layer: A cell of two layer names (string) to connect to the 1st and 2nd inputs of the SSD head. is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. scope_name: scope name (string) of network variables. """ super(SSDMobileNetV3FeatureExtractorBase, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams ) self._conv_defs = conv_defs self._from_layer = from_layer self._scope_name = scope_name def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError if conv_defs is not provided or from_layer does not meet the size requirement. """ if not self._conv_defs: raise ValueError('Must provide backbone conv defs.') if len(self._from_layer) != 2: raise ValueError('SSD input feature names are not provided.') preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': [ self._from_layer[0], self._from_layer[1], '', '', '', '' ], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope( self._scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v3.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), conv_defs=self._conv_defs, final_endpoint=self._from_layer[1], depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values()) class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): """Mobilenet V3-Large feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): super(SSDMobileNetV3LargeFeatureExtractor, self).__init__( conv_defs=mobilenet_v3.V3_LARGE_DETECTION, from_layer=['layer_14/expansion_output', 'layer_17'], is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name ) class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): """Mobilenet V3-Small feature extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False, scope_name='MobilenetV3'): super(SSDMobileNetV3SmallFeatureExtractor, self).__init__( conv_defs=mobilenet_v3.V3_SMALL_DETECTION, from_layer=['layer_10/expansion_output', 'layer_13'], is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, scope_name=scope_name )
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v3_feature_extractor.py
ssd_mobilenet_v3_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """MobileNetV2 Feature Extractor for SSD Models. Mobilenet v2 (experimental), designed by sandler@. More details can be found in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV2FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_feature_extractor.py
ssd_mobilenet_v2_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v1 from object_detection.utils import ops from object_detection.utils import shape_utils class SSDMobileNetV1KerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras MobilenetV1 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """Keras MobileNetV1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV1KerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } self.classification_backbone = None self._feature_map_generator = None def build(self, input_shape): full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) conv2d_11_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_11_relu').output conv2d_13_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_13_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v1.inputs, outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) self._feature_map_generator = ( feature_map_generators.KerasMultiResolutionFeatureMaps( feature_map_layout=self._feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_maps = self._feature_map_generator({ 'Conv2d_11_pointwise': image_features[0], 'Conv2d_13_pointwise': image_features[1]}) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py
ssd_mobilenet_v1_keras_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 _MOBILENET_V2_FPN_SKIP_LAYERS = [ 'block_2_add', 'block_5_add', 'block_9_add', 'out_relu' ] class CenterNetMobileNetV2FPNFeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The MobileNet V2 with FPN skip layers feature extractor for CenterNet.""" def __init__(self, mobilenet_v2_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False, use_separable_conv=False, upsampling_interpolation='nearest'): """Intializes the feature extractor. Args: mobilenet_v2_net: The underlying mobilenet_v2 network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. use_separable_conv: If set to True, all convolutional layers in the FPN network will be replaced by separable convolutions. upsampling_interpolation: A string (one of 'nearest' or 'bilinear') indicating which interpolation method to use for the upsampling ops in the FPN. """ super(CenterNetMobileNetV2FPNFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._base_model = mobilenet_v2_net output = self._base_model(self._base_model.input) # Add pyramid feature network on every layer that has stride 2. skip_outputs = [ self._base_model.get_layer(skip_layer_name).output for skip_layer_name in _MOBILENET_V2_FPN_SKIP_LAYERS ] self._fpn_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=skip_outputs) fpn_outputs = self._fpn_model(self._base_model.input) # Construct the top-down feature maps -- we start with an output of # 7x7x1280, which we continually upsample, apply a residual on and merge. # This results in a 56x56x24 output volume. top_layer = fpn_outputs[-1] # Use normal convolutional layer since the kernel_size is 1. residual_op = tf.keras.layers.Conv2D( filters=64, kernel_size=1, strides=1, padding='same') top_down = residual_op(top_layer) num_filters_list = [64, 32, 24] for i, num_filters in enumerate(num_filters_list): level_ind = len(num_filters_list) - 1 - i # Upsample. upsample_op = tf.keras.layers.UpSampling2D( 2, interpolation=upsampling_interpolation) top_down = upsample_op(top_down) # Residual (skip-connection) from bottom-up pathway. # Use normal convolutional layer since the kernel_size is 1. residual_op = tf.keras.layers.Conv2D( filters=num_filters, kernel_size=1, strides=1, padding='same') residual = residual_op(fpn_outputs[level_ind]) # Merge. top_down = top_down + residual next_num_filters = num_filters_list[i + 1] if i + 1 <= 2 else 24 if use_separable_conv: conv = tf.keras.layers.SeparableConv2D( filters=next_num_filters, kernel_size=3, strides=1, padding='same') else: conv = tf.keras.layers.Conv2D( filters=next_num_filters, kernel_size=3, strides=1, padding='same') top_down = conv(top_down) top_down = tf.keras.layers.BatchNormalization()(top_down) top_down = tf.keras.layers.ReLU()(top_down) output = top_down self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=output) def preprocess(self, resized_inputs): resized_inputs = super(CenterNetMobileNetV2FPNFeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) @property def supported_sub_model_types(self): return ['classification'] def get_sub_model(self, sub_model_type): if sub_model_type == 'classification': return self._base_model else: ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) def call(self, inputs): return [self._feature_extractor_model(inputs)] @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """The number of feature outputs returned by the feature extractor.""" return 1 def mobilenet_v2_fpn(channel_means, channel_stds, bgr_ordering, use_separable_conv=False, depth_multiplier=1.0, upsampling_interpolation='nearest', **kwargs): """The MobileNetV2+FPN backbone for CenterNet.""" del kwargs # Set to batchnorm_training to True for now. network = mobilenetv2.mobilenet_v2( batchnorm_training=True, alpha=depth_multiplier, include_top=False, weights='imagenet' if depth_multiplier == 1.0 else None) return CenterNetMobileNetV2FPNFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering, use_separable_conv=use_separable_conv, upsampling_interpolation=upsampling_interpolation)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py
center_net_mobilenet_v2_fpn_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import mobilenet_v1 from object_detection.models.keras_models import model_utils from object_detection.utils import ops from object_detection.utils import shape_utils # A modified config of mobilenet v1 that makes it more detection friendly. def _create_modified_mobilenet_config(): conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512) conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256) return [conv_def_block_12, conv_def_block_13] class SSDMobileNetV1FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based MobilenetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Mobilenet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v1 layers {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, Conv2d_13_pointwise}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op self._feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, conv_defs=self._conv_defs, include_top=False) conv2d_3_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_3_relu').output conv2d_5_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_5_relu').output conv2d_11_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_11_relu').output conv2d_13_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_13_relu').output self.classification_backbone = tf.keras.Model( inputs=full_mobilenet_v1.inputs, outputs=[conv2d_3_pointwise, conv2d_5_pointwise, conv2d_11_pointwise, conv2d_13_pointwise] ) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + 13) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append(self._feature_blocks[level - 2]) feature_start_index = len(self._feature_blocks) - self._num_levels fpn_input_image_features = [ (key, image_features[feature_start_index + index]) for index, key in enumerate(feature_block_list)] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( self._feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( self._feature_blocks[self._base_fpn_max_level - 2])] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map for restoring from an (object-based) checkpoint. Args: feature_extractor_scope: A scope name for the feature extractor (unused). Returns: A dict mapping keys to Keras models """ return {'feature_extractor': self.classification_backbone}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py
ssd_mobilenet_v1_fpn_keras_feature_extractor.py
from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import hourglass_network class CenterNetHourglassFeatureExtractor( center_net_meta_arch.CenterNetFeatureExtractor): """The hourglass feature extractor for CenterNet. This class is a thin wrapper around the HourglassFeatureExtractor class along with some preprocessing methods inherited from the base class. """ def __init__(self, hourglass_net, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Intializes the feature extractor. Args: hourglass_net: The underlying hourglass network to use. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetHourglassFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) self._network = hourglass_net def call(self, inputs): return self._network(inputs) @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" return self._network.num_hourglasses def hourglass_10(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-10 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_10(num_channels=32) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_20(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-20 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_20(num_channels=48) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_32(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-32 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_32(num_channels=48) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_52(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-52 backbone for CenterNet.""" del kwargs network = hourglass_network.hourglass_52(num_channels=64) return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def hourglass_104(channel_means, channel_stds, bgr_ordering, **kwargs): """The Hourglass-104 backbone for CenterNet.""" del kwargs # TODO(vighneshb): update hourglass_104 signature to match with other # hourglass networks. network = hourglass_network.hourglass_104() return CenterNetHourglassFeatureExtractor( network, channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_hourglass_feature_extractor.py
center_net_hourglass_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 # A modified config of mobilenet v1 that makes it more detection friendly, def _create_modified_mobilenet_config(): conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS) conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512) conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256) return conv_defs class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Mobilenet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v1 layers {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, Conv2d_13_pointwise}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial( slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py
ssd_mobilenet_v1_fpn_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 from object_detection.utils import ops from object_detection.utils import shape_utils _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', 'conv4_block36_out', 'conv5_block3_out'], } class SSDResNetV1FpnKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, resnet_v1_base_model, resnet_v1_base_model_name, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name=None): """SSD Keras based FPN feature extractor Resnet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. resnet_v1_base_model: base resnet v1 network to use. One of the resnet_v1.resnet_v1_{50,101,152} models. resnet_v1_base_model_name: model name under which to construct resnet v1. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) if self._use_explicit_padding: raise ValueError('Explicit padding is not a valid option.') if self._use_depthwise: raise ValueError('Depthwise is not a valid option.') self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None self._coarse_feature_layers = [] def build(self, input_shape): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] outputs = [full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers depth = self._depth_fn(self._additional_layer_depth) for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D( depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) self.built = True def preprocess(self, resized_inputs): """SSD preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) image_features = self.classification_backbone( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, image_features))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) last_feature_map = fpn_features['top_down_block{}'.format( self._base_fpn_max_level - 1)] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) return feature_maps class SSDResNet50V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-50 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet50V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-50 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDResNet101V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-101 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet101V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-101 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDResNet152V1FpnKerasFeatureExtractor( SSDResNetV1FpnKerasFeatureExtractor): """SSD Feature Extractor using Keras-based ResnetV1-152 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=None, override_base_feature_extractor_hyperparams=False, name='ResNet152V1_FPN'): """SSD Keras based FPN feature extractor ResnetV1-152 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. fpn_min_level: the minimum level in feature pyramid networks. fpn_max_level: the maximum level in feature pyramid networks. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: whether to use explicit padding when extracting features. Default is None, as it's an invalid option and not implemented in this feature extractor. use_depthwise: Whether to use depthwise convolutions. UNUSED currently. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py
ssd_resnet_v1_fpn_keras_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 # A modified config of mobilenet v2 that makes it more detection friendly. def _create_modified_mobilenet_config(): conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF) conv_defs['spec'][-1] = mobilenet.op( slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256) return conv_defs class SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV2 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, use_native_resize_op=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Mobilenet v2 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v2 layers {layer_4, layer_7, layer_14, layer_19}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize to do upsampling in FPN. Default is false. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV2FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() self._use_native_resize_op = use_native_resize_op def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'layer_4', 'layer_7', 'layer_14', 'layer_19' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial( slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19)) feature_maps.append(last_feature_map) return feature_maps
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py
ssd_mobilenet_v2_fpn_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor): """Resnet v2 base feature extractor for the CenterNet model.""" def __init__(self, resnet_type, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes the feature extractor with a specific ResNet architecture. Args: resnet_type: A string specifying which kind of ResNet to use. Currently only `resnet_v2_50` and `resnet_v2_101` are supported. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetResnetFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) if resnet_type == 'resnet_v2_101': self._base_model = tf.keras.applications.ResNet101V2(weights=None, include_top=False) output_layer = 'conv5_block3_out' elif resnet_type == 'resnet_v2_50': self._base_model = tf.keras.applications.ResNet50V2(weights=None, include_top=False) output_layer = 'conv5_block3_out' else: raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) output_layer = self._base_model.get_layer(output_layer) self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, outputs=output_layer.output) resnet_output = self._resnet_model(self._base_model.input) for num_filters in [256, 128, 64]: # TODO(vighneshb) This section has a few differences from the paper # Figure out how much of a performance impact they have. # 1. We use a simple convolution instead of a deformable convolution conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3, strides=1, padding='same') resnet_output = conv(resnet_output) resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) resnet_output = tf.keras.layers.ReLU()(resnet_output) # 2. We use the default initialization for the convolution layers # instead of initializing it to do bilinear upsampling. conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters, kernel_size=3, strides=2, padding='same') resnet_output = conv_transpose(resnet_output) resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) resnet_output = tf.keras.layers.ReLU()(resnet_output) self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=resnet_output) def preprocess(self, resized_inputs): """Preprocess input images for the ResNet model. This scales images in the range [0, 255] to the range [-1, 1] Args: resized_inputs: a [batch, height, width, channels] float32 tensor. Returns: outputs: a [batch, height, width, channels] float32 tensor. """ resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess( resized_inputs) return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) def call(self, inputs): """Returns image features extracted by the backbone. Args: inputs: An image tensor of shape [batch_size, input_height, input_width, 3] Returns: features_list: A list of length 1 containing a tensor of shape [batch_size, input_height // 4, input_width // 4, 64] containing the features extracted by the ResNet. """ return [self._feature_extractor_model(inputs)] @property def num_feature_outputs(self): return 1 @property def out_stride(self): return 4 @property def classification_backbone(self): return self._base_model def resnet_v2_101(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v2 101 feature extractor.""" del kwargs return CenterNetResnetFeatureExtractor( resnet_type='resnet_v2_101', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v2_50(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v2 50 feature extractor.""" del kwargs return CenterNetResnetFeatureExtractor( resnet_type='resnet_v2_50', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_feature_extractor.py
center_net_resnet_feature_extractor.py
"""SSD Keras-based EfficientNet + BiFPN (EfficientDet) Feature Extractor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging from six.moves import range from six.moves import zip import tensorflow.compat.v2 as tf from tensorflow.python.keras import backend as keras_backend from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from official.vision.image_classification.efficientnet import efficientnet_model _EFFICIENTNET_LEVEL_ENDPOINTS = { 1: 'stack_0/block_0/project_bn', 2: 'stack_1/block_1/add', 3: 'stack_2/block_1/add', 4: 'stack_4/block_2/add', 5: 'stack_6/block_0/project_bn', } class SSDEfficientNetBiFPNKerasFeatureExtractor( ssd_meta_arch.SSDKerasFeatureExtractor): """SSD Keras-based EfficientNetBiFPN (EfficientDet) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level, bifpn_max_level, bifpn_num_iterations, bifpn_num_filters, bifpn_combine_method, efficientnet_version, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name=None): """SSD Keras-based EfficientNetBiFPN (EfficientDet) feature extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. efficientnet_version: the EfficientNet version to use for this feature extractor's backbone. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: Whether to override the efficientnet backbone's default weight decay with the weight decay defined by `conv_hyperparams`. Note, only overriding of weight decay is currently supported. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetBiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) if depth_multiplier != 1.0: raise ValueError('EfficientNetBiFPN does not support a non-default ' 'depth_multiplier.') if use_explicit_padding: raise ValueError('EfficientNetBiFPN does not support explicit padding.') if use_depthwise: raise ValueError('EfficientNetBiFPN does not support use_depthwise.') self._bifpn_min_level = bifpn_min_level self._bifpn_max_level = bifpn_max_level self._bifpn_num_iterations = bifpn_num_iterations self._bifpn_num_filters = max(bifpn_num_filters, min_depth) self._bifpn_node_params = {'combine_method': bifpn_combine_method} self._efficientnet_version = efficientnet_version logging.info('EfficientDet EfficientNet backbone version: %s', self._efficientnet_version) logging.info('EfficientDet BiFPN num filters: %d', self._bifpn_num_filters) logging.info('EfficientDet BiFPN num iterations: %d', self._bifpn_num_iterations) self._backbone_max_level = min( max(_EFFICIENTNET_LEVEL_ENDPOINTS.keys()), bifpn_max_level) self._output_layer_names = [ _EFFICIENTNET_LEVEL_ENDPOINTS[i] for i in range(bifpn_min_level, self._backbone_max_level + 1)] self._output_layer_alias = [ 'level_{}'.format(i) for i in range(bifpn_min_level, self._backbone_max_level + 1)] # Initialize the EfficientNet backbone. # Note, this is currently done in the init method rather than in the build # method, since doing so introduces an error which is not well understood. efficientnet_overrides = {'rescale_input': False} if override_base_feature_extractor_hyperparams: efficientnet_overrides[ 'weight_decay'] = conv_hyperparams.get_regularizer_weight() if (conv_hyperparams.use_sync_batch_norm() and keras_backend.is_tpu_strategy(tf.distribute.get_strategy())): efficientnet_overrides['batch_norm'] = 'tpu' efficientnet_base = efficientnet_model.EfficientNet.from_name( model_name=self._efficientnet_version, overrides=efficientnet_overrides) outputs = [efficientnet_base.get_layer(output_layer_name).output for output_layer_name in self._output_layer_names] self._efficientnet = tf.keras.Model( inputs=efficientnet_base.inputs, outputs=outputs) self.classification_backbone = efficientnet_base self._bifpn_stage = None def build(self, input_shape): self._bifpn_stage = bifpn_generators.KerasBiFpnFeatureMaps( bifpn_num_iterations=self._bifpn_num_iterations, bifpn_num_filters=self._bifpn_num_filters, fpn_min_level=self._bifpn_min_level, fpn_max_level=self._bifpn_max_level, input_max_level=self._backbone_max_level, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, bifpn_node_params=self._bifpn_node_params, name='bifpn') self.built = True def preprocess(self, inputs): """SSD preprocessing. Channel-wise mean subtraction and scaling. Args: inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ if inputs.shape.as_list()[3] == 3: # Input images are expected to be in the range [0, 255]. channel_offset = [0.485, 0.456, 0.406] channel_scale = [0.229, 0.224, 0.225] return ((inputs / 255.0) - [[channel_offset]]) / [[channel_scale]] else: return inputs def _extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) base_feature_maps = self._efficientnet( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) output_feature_map_dict = self._bifpn_stage( list(zip(self._output_layer_alias, base_feature_maps))) return list(output_feature_map_dict.values()) class SSDEfficientNetB0BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=3, bifpn_num_filters=64, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D0'): """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB0BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b0', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB1BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=4, bifpn_num_filters=88, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D1'): """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB1BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b1', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB2BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=5, bifpn_num_filters=112, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D2'): """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB2BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b2', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB3BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=6, bifpn_num_filters=160, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D3'): """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB3BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b3', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB4BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=7, bifpn_num_filters=224, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D4'): """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB4BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b4', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB5BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=7, bifpn_num_filters=288, bifpn_combine_method='fast_attention', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D5'): """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB5BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b5', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB6BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=8, bifpn_num_filters=384, bifpn_combine_method='sum', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientDet-D6-D7'): """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor. SSD Keras EfficientNet-b6 BiFPN Feature Extractor, a.k.a. EfficientDet-d6 and EfficientDet-d7. The EfficientDet-d[6,7] models use the same backbone EfficientNet-b6 and the same BiFPN architecture, and therefore have the same number of parameters. They only differ in their input resolutions. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB6BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b6', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name) class SSDEfficientNetB7BiFPNKerasFeatureExtractor( SSDEfficientNetBiFPNKerasFeatureExtractor): """SSD Keras EfficientNet-b7 BiFPN Feature Extractor.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, bifpn_min_level=3, bifpn_max_level=7, bifpn_num_iterations=8, bifpn_num_filters=384, bifpn_combine_method='sum', use_explicit_padding=None, use_depthwise=None, override_base_feature_extractor_hyperparams=None, name='EfficientNet-B7_BiFPN'): """SSD Keras EfficientNet-b7 BiFPN Feature Extractor. Args: is_training: whether the network is in training mode. depth_multiplier: unsupported by EfficientNetBiFPN. float, depth multiplier for the feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. bifpn_min_level: the highest resolution feature map to use in BiFPN. The valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} respectively. bifpn_max_level: the smallest resolution feature map to use in the BiFPN. BiFPN constructions uses features maps starting from bifpn_min_level upto the bifpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of BiFPN levels. bifpn_num_iterations: number of BiFPN iterations. Overrided if efficientdet_version is provided. bifpn_num_filters: number of filters (channels) in all BiFPN layers. Overrided if efficientdet_version is provided. bifpn_combine_method: the method used to combine BiFPN nodes. use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use explicit padding when extracting features. use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular convolutions when inputs to a node have a differing number of channels, and use separable convolutions after combine operations. override_base_feature_extractor_hyperparams: unsupported. Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams`. name: a string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDEfficientNetB7BiFPNKerasFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, bifpn_min_level=bifpn_min_level, bifpn_max_level=bifpn_max_level, bifpn_num_iterations=bifpn_num_iterations, bifpn_num_filters=bifpn_num_filters, bifpn_combine_method=bifpn_combine_method, efficientnet_version='efficientnet-b7', use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams, name=name)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py
ssd_efficientnet_bifpn_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import variables_helper from nets.nasnet import nasnet_utils try: from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass arg_scope = slim.arg_scope def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc def _filter_scaling(reduction_indices, start_cell_num): """Compute the expected filter scaling at given PNASNet cell start_cell_num. In the pnasnet.py code, filter_scaling starts at 1.0. We instead adapt filter scaling to depend on the starting cell. At first cells, before any reduction, filter_scalling is 1.0. With passing any reduction cell, the filter_scaling is multiplied by 2. Args: reduction_indices: list of int indices. start_cell_num: int. Returns: filter_scaling: float. """ filter_scaling = 1.0 for ind in reduction_indices: if ind < start_cell_num: filter_scaling *= 2.0 return filter_scaling # Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but # with special edits to remove instantiation of the stem and the special # ability to receive as input a pair of hidden states. It constructs only # a sub-network from the original PNASNet model, starting from the # start_cell_num cell and with modified final layer. def _build_pnasnet_base( hidden_previous, hidden, normal_cell, hparams, true_cell_num, start_cell_num): """Constructs a PNASNet image model for proposal classifier features.""" # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) filter_scaling = _filter_scaling(reduction_indices, start_cell_num) # Note: The None is prepended to match the behavior of _imagenet_stem() cell_outputs = [None, hidden_previous, hidden] net = hidden # Run the cells for cell_num in range(start_cell_num, hparams.num_cells): is_reduction = cell_num in reduction_indices stride = 2 if is_reduction else 1 if is_reduction: filter_scaling *= hparams.filter_scaling_rate prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) # Final nonlinearity. # Note that we have dropped the final pooling, dropout and softmax layers # from the default pnasnet version. with tf.variable_scope('final_layer'): net = tf.nn.relu(net) return net # TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet # featurization. The reason for this is that pnasnet.py only supports # inputs with fully known shapes. We need to update pnasnet.py to handle # shapes not known at compile time. class FasterRCNNPNASFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with PNASNet feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNPNASFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with PNAS preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the PNASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = pnasnet.build_pnasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_7') # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. # Cell_7 is the last cell before second reduction. rpn_feature_map = tf.concat([end_points['Cell_6'], end_points['Cell_7']], 3) # pnasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the PNASNet network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Number of used stem cells. num_stem_cells = 2 # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_pnasnet_large() within # pnasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = pnasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network total_num_cells = hparams.num_cells + num_stem_cells normal_cell = pnasnet.PNasNetNormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_7' used by # _extract_proposal_features(). start_cell_num = 8 true_cell_num = start_cell_num + num_stem_cells with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net = _build_pnasnet_base( hidden_previous, hidden, normal_cell=normal_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for PNASNet checkpoints. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_pnas_feature_extractor.py
faster_rcnn_pnas_feature_extractor.py
import tensorflow.compat.v1 as tf from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor from object_detection.models.keras_models import resnet_v1 _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_18': ['conv2_block2_out', 'conv3_block2_out', 'conv4_block2_out', 'conv5_block2_out'], 'resnet_v1_34': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', 'conv5_block3_out'], 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', 'conv4_block23_out', 'conv5_block3_out'], } class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor): """Resnet v1 FPN base feature extractor for the CenterNet model. This feature extractor uses residual skip connections and nearest neighbor upsampling to produce an output feature map of stride 4, which has precise localization information along with strong semantic information from the top of the net. This design does not exactly follow the original FPN design, specifically: - Since only one output map is necessary for heatmap prediction (stride 4 output), the top-down feature maps can have different numbers of channels. Specifically, the top down feature maps have the following sizes: [h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256]. - No additional coarse features are used after conv5_x. """ def __init__(self, resnet_type, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes the feature extractor with a specific ResNet architecture. Args: resnet_type: A string specifying which kind of ResNet to use. Currently only `resnet_v1_50` and `resnet_v1_101` are supported. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetResnetV1FpnFeatureExtractor, self).__init__( channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) if resnet_type == 'resnet_v1_50': self._base_model = tf.keras.applications.ResNet50(weights=None, include_top=False) elif resnet_type == 'resnet_v1_101': self._base_model = tf.keras.applications.ResNet101(weights=None, include_top=False) elif resnet_type == 'resnet_v1_18': self._base_model = resnet_v1.resnet_v1_18(weights=None, include_top=False) elif resnet_type == 'resnet_v1_34': self._base_model = resnet_v1.resnet_v1_34(weights=None, include_top=False) else: raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type] outputs = [self._base_model.get_layer(output_layer_name).output for output_layer_name in output_layers] self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, outputs=outputs) resnet_outputs = self._resnet_model(self._base_model.input) # Construct the top-down feature maps. top_layer = resnet_outputs[-1] residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1, strides=1, padding='same') top_down = residual_op(top_layer) num_filters_list = [256, 128, 64] for i, num_filters in enumerate(num_filters_list): level_ind = 2 - i # Upsample. upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest') top_down = upsample_op(top_down) # Residual (skip-connection) from bottom-up pathway. residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1, strides=1, padding='same') residual = residual_op(resnet_outputs[level_ind]) # Merge. top_down = top_down + residual next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64 conv = tf.keras.layers.Conv2D(filters=next_num_filters, kernel_size=3, strides=1, padding='same') top_down = conv(top_down) top_down = tf.keras.layers.BatchNormalization()(top_down) top_down = tf.keras.layers.ReLU()(top_down) self._feature_extractor_model = tf.keras.models.Model( inputs=self._base_model.input, outputs=top_down) def preprocess(self, resized_inputs): """Preprocess input images for the ResNet model. This scales images in the range [0, 255] to the range [-1, 1] Args: resized_inputs: a [batch, height, width, channels] float32 tensor. Returns: outputs: a [batch, height, width, channels] float32 tensor. """ resized_inputs = super( CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs) return tf.keras.applications.resnet.preprocess_input(resized_inputs) def load_feature_extractor_weights(self, path): self._base_model.load_weights(path) def call(self, inputs): """Returns image features extracted by the backbone. Args: inputs: An image tensor of shape [batch_size, input_height, input_width, 3] Returns: features_list: A list of length 1 containing a tensor of shape [batch_size, input_height // 4, input_width // 4, 64] containing the features extracted by the ResNet. """ return [self._feature_extractor_model(inputs)] @property def num_feature_outputs(self): return 1 @property def out_stride(self): return 4 @property def classification_backbone(self): return self._base_model def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 101 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_101', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 50 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_50', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering) def resnet_v1_34_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 34 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_34', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering ) def resnet_v1_18_fpn(channel_means, channel_stds, bgr_ordering, **kwargs): """The ResNet v1 18 FPN feature extractor.""" del kwargs return CenterNetResnetV1FpnFeatureExtractor( resnet_type='resnet_v1_18', channel_means=channel_means, channel_stds=channel_stds, bgr_ordering=bgr_ordering)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py
center_net_resnet_v1_fpn_feature_extractor.py
"""SSDFeatureExtractor for InceptionV3 features.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import ops from object_detection.utils import shape_utils from nets import inception_v3 class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using InceptionV3 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """InceptionV3 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. Raises: ValueError: If `override_base_feature_extractor_hyperparams` is False. """ super(SSDInceptionV3FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, num_layers=num_layers, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) if not self._override_base_feature_extractor_hyperparams: raise ValueError('SSD Inception V3 feature extractor always uses' 'scope returned by `conv_hyperparams_fn` for both the ' 'base feature extractor and the additional layers ' 'added since there is no arg_scope defined for the base ' 'feature extractor.') def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '' ][:self._num_layers], 'layer_depth': [-1, -1, -1, 512, 256, 128][:self._num_layers], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: _, image_features = inception_v3.inception_v3_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_7c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_inception_v3_feature_extractor.py
ssd_inception_v3_feature_extractor.py
import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import resnet_utils from nets import resnet_v1 class FasterRCNNResnetV1FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN Resnet V1 feature extractor implementation.""" def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: architecture: Architecture name of the Resnet V1 model. resnet_model: Definition of the Resnet V1 model. is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: Activaton functon to use in Resnet V1 model. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._architecture = architecture self._resnet_model = resnet_model self._activation_fn = activation_fn super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Note that if the number of channels is not equal to 3, the mean subtraction will be skipped and the original resized_inputs will be returned. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ if resized_inputs.shape.as_list()[3] == 3: channel_means = [123.68, 116.779, 103.939] return resized_inputs - [[channel_means]] else: return resized_inputs def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping feature extractor tensor names to tensors Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) shape_assert = tf.Assert( tf.logical_and( tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): # Disables batchnorm for fine-tuning with smaller batch sizes. # TODO(chensun): Figure out if it is needed when image # batch size is bigger. with slim.arg_scope( resnet_utils.resnet_arg_scope( batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=self._activation_fn, weight_decay=self._weight_decay)): with tf.variable_scope( self._architecture, reuse=self._reuse_weights) as var_scope: _, activations = self._resnet_model( preprocessed_inputs, num_classes=None, is_training=self._train_batch_norm, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope) handle = scope + '/%s/block3' % self._architecture return activations[handle], activations def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope(self._architecture, reuse=self._reuse_weights): with slim.arg_scope( resnet_utils.resnet_arg_scope( batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=self._activation_fn, weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): blocks = [ resnet_utils.Block('block4', resnet_v1.bottleneck, [{ 'depth': 2048, 'depth_bottleneck': 512, 'stride': 1 }] * 3) ] proposal_classifier_features = resnet_utils.stack_blocks_dense( proposal_feature_maps, blocks) return proposal_classifier_features class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 50 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet50FeatureExtractor, self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn) class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 101 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet101FeatureExtractor, self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn) class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): """Faster R-CNN Resnet 152 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0, activation_fn=tf.nn.relu): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. activation_fn: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported. """ super(FasterRCNNResnet152FeatureExtractor, self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay, activation_fn)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py
faster_rcnn_resnet_v1_feature_extractor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm class _LayersOverride(object): """Alternative Keras layers interface for the Keras InceptionResNetV2.""" def __init__(self, batchnorm_training, output_stride=16, align_feature_maps=False, batchnorm_scale=False, default_batchnorm_momentum=0.999, default_batchnorm_epsilon=1e-3, weight_decay=0.00004): """Alternative tf.keras.layers interface, for use by InceptionResNetV2. It is used by the Keras applications kwargs injection API to modify the Inception Resnet V2 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Supports freezing batch norm layers - Adds support for feature map alignment (like in the Slim model) - Adds support for changing the output stride (like in the Slim model) - Adds support for overriding various batch norm hyperparameters Because the Keras inception resnet v2 application does not assign explicit names to most individual layers, the injection of output stride support works by identifying convolution layers according to their filter counts and pre-feature-map-alignment padding arguments. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. output_stride: A scalar that specifies the requested ratio of input to output spatial resolution. Only supports 8 and 16. align_feature_maps: When true, changes all the VALID paddings in the network to SAME padding so that the feature maps are aligned. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. Batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: small float added to variance to avoid dividing by zero. weight_decay: the l2 regularization weight decay for weights variables. (gets multiplied by 0.5 to map from slim l2 regularization weight to Keras l2 regularization weight). """ self._use_atrous = output_stride == 8 self._align_feature_maps = align_feature_maps self._batchnorm_training = batchnorm_training self._batchnorm_scale = batchnorm_scale self._default_batchnorm_momentum = default_batchnorm_momentum self._default_batchnorm_epsilon = default_batchnorm_epsilon self.regularizer = tf.keras.regularizers.l2(weight_decay * 0.5) def Conv2D(self, filters, kernel_size, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras InceptionResnetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. If feature map alignment is enabled, the padding will be forced to 'same'. If output_stride is 8, some conv2d layers will be matched according to their name or filter counts or pre-alignment padding parameters, and will have the correct 'dilation rate' or 'strides' set. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A Keras Conv2D layer specified by the Object Detection hyperparameter configurations. """ kwargs['kernel_regularizer'] = self.regularizer kwargs['bias_regularizer'] = self.regularizer # Because the Keras application does not set explicit names for most layers, # (instead allowing names to auto-increment), we must match individual # layers in the model according to their filter count, name, or # pre-alignment mapping. This means we can only align the feature maps # after we have applied our updates in cases where output_stride=8. if self._use_atrous and (filters == 384): kwargs['strides'] = 1 name = kwargs.get('name') if self._use_atrous and ( (name and 'block17' in name) or (filters == 128 or filters == 160 or (filters == 192 and kwargs.get('padding', '').lower() != 'valid'))): kwargs['dilation_rate'] = 2 if self._align_feature_maps: kwargs['padding'] = 'same' return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def MaxPooling2D(self, pool_size, strides, **kwargs): """Builds a pooling layer according to the current Object Detection config. Overrides the Keras InceptionResnetV2 application's MaxPooling2D layers with ones that follow the spec specified by the Object Detection hyperparameters. If feature map alignment is enabled, the padding will be forced to 'same'. If output_stride is 8, some pooling layers will be matched according to their pre-alignment padding parameters, and will have their 'strides' argument overridden. Args: pool_size: The pool size specified by the Keras application. strides: The strides specified by the unwrapped Keras application. **kwargs: Keyword args specified by the Keras application for constructing the max pooling layer. Returns: A MaxPool2D layer specified by the Object Detection hyperparameter configurations. """ if self._use_atrous and kwargs.get('padding', '').lower() == 'valid': strides = 1 if self._align_feature_maps: kwargs['padding'] = 'same' return tf.keras.layers.MaxPool2D(pool_size, strides=strides, **kwargs) # We alias MaxPool2D because Keras has that alias MaxPool2D = MaxPooling2D # pylint: disable=invalid-name def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Keyword arguments from the `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ kwargs['scale'] = self._batchnorm_scale return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=self._default_batchnorm_epsilon, momentum=self._default_batchnorm_momentum, **kwargs) # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def inception_resnet_v2( batchnorm_training, output_stride=16, align_feature_maps=False, batchnorm_scale=False, weight_decay=0.00004, default_batchnorm_momentum=0.9997, default_batchnorm_epsilon=0.001, **kwargs): """Instantiates the InceptionResnetV2 architecture. (Modified for object detection) This wraps the InceptionResnetV2 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Supports freezing batch norm layers with FreezableBatchNorms - Adds support for feature map alignment (like in the Slim model) - Adds support for changing the output stride (like in the Slim model) - Changes the default batchnorm momentum to 0.9997 - Adds support for overriding various batchnorm hyperparameters Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. output_stride: A scalar that specifies the requested ratio of input to output spatial resolution. Only supports 8 and 16. align_feature_maps: When true, changes all the VALID paddings in the network to SAME padding so that the feature maps are aligned. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. weight_decay: the l2 regularization weight decay for weights variables. (gets multiplied by 0.5 to map from slim l2 regularization weight to Keras l2 regularization weight). default_batchnorm_momentum: Float. Batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: small float added to variance to avoid dividing by zero. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.InceptionResNetV2` method that constructs the Keras model. Returns: A Keras model instance. """ if output_stride != 8 and output_stride != 16: raise ValueError('output_stride must be 8 or 16.') layers_override = _LayersOverride( batchnorm_training, output_stride, align_feature_maps=align_feature_maps, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, weight_decay=weight_decay) return tf.keras.applications.InceptionResNetV2( layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/inception_resnet_v2.py
inception_resnet_v2.py
from __future__ import print_function import os from absl import app import numpy as np import tensorflow.compat.v1 as tf FLAGS = tf.flags.FLAGS tf.flags.DEFINE_string('model', 'resnet_v2_101', 'The model to load. The following are supported: ' '"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", ' '"resnet_v2_101"') tf.flags.DEFINE_string('output_path', None, 'The directory to output weights in.') tf.flags.DEFINE_boolean('verify_weights', True, ('Verify the weights are loaded correctly by making ' 'sure the predictions are the same before and after ' 'saving.')) def init_model(name): """Creates a Keras Model with the specific ResNet version.""" if name == 'resnet_v1_50': model = tf.keras.applications.ResNet50(weights='imagenet') elif name == 'resnet_v1_101': model = tf.keras.applications.ResNet101(weights='imagenet') elif name == 'resnet_v2_50': model = tf.keras.applications.ResNet50V2(weights='imagenet') elif name == 'resnet_v2_101': model = tf.keras.applications.ResNet101V2(weights='imagenet') else: raise ValueError('Model {} not supported'.format(FLAGS.model)) return model def main(_): model = init_model(FLAGS.model) path = os.path.join(FLAGS.output_path, FLAGS.model) tf.gfile.MakeDirs(path) weights_path = os.path.join(path, 'weights') ckpt = tf.train.Checkpoint(feature_extractor=model) saved_path = ckpt.save(weights_path) if FLAGS.verify_weights: imgs = np.random.randn(1, 224, 224, 3).astype(np.float32) keras_preds = model(imgs) model = init_model(FLAGS.model) ckpt.restore(saved_path) loaded_weights_pred = model(imgs).numpy() if not np.all(np.isclose(keras_preds, loaded_weights_pred)): raise RuntimeError('The model was not saved correctly.') if __name__ == '__main__': tf.enable_v2_behavior() app.run(main)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/convert_keras_models.py
convert_keras_models.py
import tensorflow.compat.v2 as tf BATCH_NORM_EPSILON = 1e-5 BATCH_NORM_MOMENTUM = 0.1 BATCH_NORM_FUSED = True class IdentityLayer(tf.keras.layers.Layer): """A layer which passes through the input as it is.""" def call(self, inputs): return inputs def _get_padding_for_kernel_size(kernel_size): if kernel_size == 7: return (3, 3) elif kernel_size == 3: return (1, 1) else: raise ValueError('Padding for kernel size {} not known.'.format( kernel_size)) def batchnorm(): try: return tf.keras.layers.experimental.SyncBatchNormalization( name='batchnorm', epsilon=1e-5, momentum=0.1) except AttributeError: return tf.keras.layers.BatchNormalization( name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED) class ConvolutionalBlock(tf.keras.layers.Layer): """Block that aggregates Convolution + Norm layer + ReLU.""" def __init__(self, kernel_size, out_channels, stride=1, relu=True, padding='same'): """Initializes the Convolutional block. Args: kernel_size: int, convolution kernel size. out_channels: int, the desired number of output channels. stride: Integer, stride used in the convolution. relu: bool, whether to use relu at the end of the layer. padding: str, the padding scheme to use when kernel_size <= 1 """ super(ConvolutionalBlock, self).__init__() if kernel_size > 1: padding = 'valid' padding_size = _get_padding_for_kernel_size(kernel_size) # TODO(vighneshb) Explore if removing and using padding option in conv # layer works. self.pad = tf.keras.layers.ZeroPadding2D(padding_size) else: self.pad = IdentityLayer() self.conv = tf.keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, use_bias=False, strides=stride, padding=padding) self.norm = batchnorm() if relu: self.relu = tf.keras.layers.ReLU() else: self.relu = IdentityLayer() def call(self, inputs): net = self.pad(inputs) net = self.conv(net) net = self.norm(net) return self.relu(net) class SkipConvolution(ConvolutionalBlock): """The skip connection layer for a ResNet.""" def __init__(self, out_channels, stride): """Initializes the skip convolution layer. Args: out_channels: int, the desired number of output channels. stride: int, the stride for the layer. """ super(SkipConvolution, self).__init__( out_channels=out_channels, kernel_size=1, stride=stride, relu=False) class ResidualBlock(tf.keras.layers.Layer): """A Residual block.""" def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1, padding='same'): """Initializes the Residual block. Args: out_channels: int, the desired number of output channels. skip_conv: bool, whether to use a conv layer for skip connections. kernel_size: int, convolution kernel size. stride: Integer, stride used in the convolution. padding: str, the type of padding to use. """ super(ResidualBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=kernel_size, out_channels=out_channels, stride=stride) self.conv = tf.keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, use_bias=False, strides=1, padding=padding) self.norm = batchnorm() if skip_conv: self.skip = SkipConvolution(out_channels=out_channels, stride=stride) else: self.skip = IdentityLayer() self.relu = tf.keras.layers.ReLU() def call(self, inputs): net = self.conv_block(inputs) net = self.conv(net) net = self.norm(net) net_skip = self.skip(inputs) return self.relu(net + net_skip) class InputDownsampleBlock(tf.keras.layers.Layer): """Block for the initial feature downsampling.""" def __init__(self, out_channels_initial_conv, out_channels_residual_block): """Initializes the downsample block. Args: out_channels_initial_conv: int, the desired number of output channels in the initial conv layer. out_channels_residual_block: int, the desired number of output channels in the underlying residual block. """ super(InputDownsampleBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=7, out_channels=out_channels_initial_conv, stride=2, padding='valid') self.residual_block = ResidualBlock( out_channels=out_channels_residual_block, stride=2, skip_conv=True) def call(self, inputs): return self.residual_block(self.conv_block(inputs)) class InputConvBlock(tf.keras.layers.Layer): """Block for the initial feature convolution. This block is used in the hourglass network when we don't want to downsample the input. """ def __init__(self, out_channels_initial_conv, out_channels_residual_block): """Initializes the downsample block. Args: out_channels_initial_conv: int, the desired number of output channels in the initial conv layer. out_channels_residual_block: int, the desired number of output channels in the underlying residual block. """ super(InputConvBlock, self).__init__() self.conv_block = ConvolutionalBlock( kernel_size=3, out_channels=out_channels_initial_conv, stride=1, padding='valid') self.residual_block = ResidualBlock( out_channels=out_channels_residual_block, stride=1, skip_conv=True) def call(self, inputs): return self.residual_block(self.conv_block(inputs)) def _make_repeated_residual_blocks(out_channels, num_blocks, initial_stride=1, residual_channels=None, initial_skip_conv=False): """Stack Residual blocks one after the other. Args: out_channels: int, the desired number of output channels. num_blocks: int, the number of residual blocks to be stacked. initial_stride: int, the stride of the initial residual block. residual_channels: int, the desired number of output channels in the intermediate residual blocks. If not specifed, we use out_channels. initial_skip_conv: bool, if set, the first residual block uses a skip convolution. This is useful when the number of channels in the input are not the same as residual_channels. Returns: blocks: A list of residual blocks to be applied in sequence. """ blocks = [] if residual_channels is None: residual_channels = out_channels for i in range(num_blocks - 1): # Only use the stride at the first block so we don't repeatedly downsample # the input stride = initial_stride if i == 0 else 1 # If the stide is more than 1, we cannot use an identity layer for the # skip connection and are forced to use a conv for the skip connection. skip_conv = stride > 1 if i == 0 and initial_skip_conv: skip_conv = True blocks.append( ResidualBlock(out_channels=residual_channels, stride=stride, skip_conv=skip_conv) ) if num_blocks == 1: # If there is only 1 block, the for loop above is not run, # therefore we honor the requested stride in the last residual block stride = initial_stride # We are forced to use a conv in the skip connection if stride > 1 skip_conv = stride > 1 else: stride = 1 skip_conv = residual_channels != out_channels blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv, stride=stride)) return blocks def _apply_blocks(inputs, blocks): net = inputs for block in blocks: net = block(net) return net class EncoderDecoderBlock(tf.keras.layers.Layer): """An encoder-decoder block which recursively defines the hourglass network.""" def __init__(self, num_stages, channel_dims, blocks_per_stage, stagewise_downsample=True, encoder_decoder_shortcut=True): """Initializes the encoder-decoder block. Args: num_stages: int, Number of stages in the network. At each stage we have 2 encoder and 1 decoder blocks. The second encoder block downsamples the input. channel_dims: int list, the output channels dimensions of stages in the network. `channel_dims[0]` is used to define the number of channels in the first encoder block and `channel_dims[1]` is used to define the number of channels in the second encoder block. The channels in the recursive inner layers are defined using `channel_dims[1:]` blocks_per_stage: int list, number of residual blocks to use at each stage. `blocks_per_stage[0]` defines the number of blocks at the current stage and `blocks_per_stage[1:]` is used at further stages. stagewise_downsample: bool, whether or not to downsample before passing inputs to the next stage. encoder_decoder_shortcut: bool, whether or not to use shortcut connections between encoder and decoder. """ super(EncoderDecoderBlock, self).__init__() out_channels = channel_dims[0] out_channels_downsampled = channel_dims[1] self.encoder_decoder_shortcut = encoder_decoder_shortcut if encoder_decoder_shortcut: self.merge_features = tf.keras.layers.Add() self.encoder_block1 = _make_repeated_residual_blocks( out_channels=out_channels, num_blocks=blocks_per_stage[0], initial_stride=1) initial_stride = 2 if stagewise_downsample else 1 self.encoder_block2 = _make_repeated_residual_blocks( out_channels=out_channels_downsampled, num_blocks=blocks_per_stage[0], initial_stride=initial_stride, initial_skip_conv=out_channels != out_channels_downsampled) if num_stages > 1: self.inner_block = [ EncoderDecoderBlock(num_stages - 1, channel_dims[1:], blocks_per_stage[1:], stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut) ] else: self.inner_block = _make_repeated_residual_blocks( out_channels=out_channels_downsampled, num_blocks=blocks_per_stage[1]) self.decoder_block = _make_repeated_residual_blocks( residual_channels=out_channels_downsampled, out_channels=out_channels, num_blocks=blocks_per_stage[0]) self.upsample = tf.keras.layers.UpSampling2D(initial_stride) def call(self, inputs): if self.encoder_decoder_shortcut: encoded_outputs = _apply_blocks(inputs, self.encoder_block1) encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2) inner_block_outputs = _apply_blocks( encoded_downsampled_outputs, self.inner_block) decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block) upsampled_outputs = self.upsample(decoded_outputs) if self.encoder_decoder_shortcut: return self.merge_features([encoded_outputs, upsampled_outputs]) else: return upsampled_outputs class HourglassNetwork(tf.keras.Model): """The hourglass network.""" def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage, blocks_per_stage, num_hourglasses, initial_downsample=True, stagewise_downsample=True, encoder_decoder_shortcut=True): """Intializes the feature extractor. Args: num_stages: int, Number of stages in the network. At each stage we have 2 encoder and 1 decoder blocks. The second encoder block downsamples the input. input_channel_dims: int, the number of channels in the input conv blocks. channel_dims_per_stage: int list, the output channel dimensions of each stage in the hourglass network. blocks_per_stage: int list, number of residual blocks to use at each stage in the hourglass network num_hourglasses: int, number of hourglas networks to stack sequentially. initial_downsample: bool, if set, downsamples the input by a factor of 4 before applying the rest of the network. Downsampling is done with a 7x7 convolution kernel, otherwise a 3x3 kernel is used. stagewise_downsample: bool, whether or not to downsample before passing inputs to the next stage. encoder_decoder_shortcut: bool, whether or not to use shortcut connections between encoder and decoder. """ super(HourglassNetwork, self).__init__() self.num_hourglasses = num_hourglasses self.initial_downsample = initial_downsample if initial_downsample: self.downsample_input = InputDownsampleBlock( out_channels_initial_conv=input_channel_dims, out_channels_residual_block=channel_dims_per_stage[0] ) else: self.conv_input = InputConvBlock( out_channels_initial_conv=input_channel_dims, out_channels_residual_block=channel_dims_per_stage[0] ) self.hourglass_network = [] self.output_conv = [] for _ in range(self.num_hourglasses): self.hourglass_network.append( EncoderDecoderBlock( num_stages=num_stages, channel_dims=channel_dims_per_stage, blocks_per_stage=blocks_per_stage, stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut) ) self.output_conv.append( ConvolutionalBlock(kernel_size=3, out_channels=channel_dims_per_stage[0]) ) self.intermediate_conv1 = [] self.intermediate_conv2 = [] self.intermediate_residual = [] for _ in range(self.num_hourglasses - 1): self.intermediate_conv1.append( ConvolutionalBlock( kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) ) self.intermediate_conv2.append( ConvolutionalBlock( kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) ) self.intermediate_residual.append( ResidualBlock(out_channels=channel_dims_per_stage[0]) ) self.intermediate_relu = tf.keras.layers.ReLU() def call(self, inputs): if self.initial_downsample: inputs = self.downsample_input(inputs) else: inputs = self.conv_input(inputs) outputs = [] for i in range(self.num_hourglasses): hourglass_output = self.hourglass_network[i](inputs) output = self.output_conv[i](hourglass_output) outputs.append(output) if i < self.num_hourglasses - 1: secondary_output = (self.intermediate_conv1[i](inputs) + self.intermediate_conv2[i](output)) secondary_output = self.intermediate_relu(secondary_output) inputs = self.intermediate_residual[i](secondary_output) return outputs @property def out_stride(self): """The stride in the output image of the network.""" return 4 @property def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" return self.num_hourglasses def _layer_depth(layer): """Compute depth of Conv/Residual blocks or lists of them.""" if isinstance(layer, list): return sum([_layer_depth(l) for l in layer]) elif isinstance(layer, ConvolutionalBlock): return 1 elif isinstance(layer, ResidualBlock): return 2 else: raise ValueError('Unknown layer - {}'.format(layer)) def _encoder_decoder_depth(network): """Helper function to compute depth of encoder-decoder blocks.""" encoder_block2_layers = _layer_depth(network.encoder_block2) decoder_block_layers = _layer_depth(network.decoder_block) if isinstance(network.inner_block[0], EncoderDecoderBlock): assert len(network.inner_block) == 1, 'Inner block is expected as length 1.' inner_block_layers = _encoder_decoder_depth(network.inner_block[0]) return inner_block_layers + encoder_block2_layers + decoder_block_layers elif isinstance(network.inner_block[0], ResidualBlock): return (encoder_block2_layers + decoder_block_layers + _layer_depth(network.inner_block)) else: raise ValueError('Unknown inner block type.') def hourglass_depth(network): """Helper function to verify depth of hourglass backbone.""" input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock # Only intermediate_conv2 and intermediate_residual are applied before # sending inputs to the later stages. intermediate_layers = ( _layer_depth(network.intermediate_conv2) + _layer_depth(network.intermediate_residual) ) # network.output_conv is applied before sending input to the later stages output_layers = _layer_depth(network.output_conv) encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in network.hourglass_network) return (input_conv_layers + encoder_decoder_layers + intermediate_layers + output_layers) def hourglass_104(): """The Hourglass-104 backbone. The architecture parameters are taken from [1]. Returns: network: An HourglassNetwork object implementing the Hourglass-104 backbone. [1]: https://arxiv.org/abs/1904.07850 """ return HourglassNetwork( input_channel_dims=128, channel_dims_per_stage=[256, 256, 384, 384, 384, 512], num_hourglasses=2, num_stages=5, blocks_per_stage=[2, 2, 2, 2, 2, 4], ) def single_stage_hourglass(input_channel_dims, channel_dims_per_stage, blocks_per_stage, initial_downsample=True, stagewise_downsample=True, encoder_decoder_shortcut=True): assert len(channel_dims_per_stage) == len(blocks_per_stage) return HourglassNetwork( input_channel_dims=input_channel_dims, channel_dims_per_stage=channel_dims_per_stage, num_hourglasses=1, num_stages=len(channel_dims_per_stage) - 1, blocks_per_stage=blocks_per_stage, initial_downsample=initial_downsample, stagewise_downsample=stagewise_downsample, encoder_decoder_shortcut=encoder_decoder_shortcut ) def hourglass_10(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[1, 1], channel_dims_per_stage=[nc * 2, nc * 2]) def hourglass_20(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3]) def hourglass_32(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[2, 2, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3]) def hourglass_52(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[2, 2, 2, 2, 2, 4], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) def hourglass_100(num_channels, initial_downsample=True): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, initial_downsample=initial_downsample, blocks_per_stage=[4, 4, 4, 4, 4, 8], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) def hourglass_20_uniform_size(num_channels): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], initial_downsample=False, stagewise_downsample=False) def hourglass_20_no_shortcut(num_channels): nc = num_channels return single_stage_hourglass( input_channel_dims=nc, blocks_per_stage=[1, 2, 2], channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], initial_downsample=False, encoder_decoder_shortcut=False)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/hourglass_network.py
hourglass_network.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from tensorflow.python.keras.applications import resnet from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name """Pads the input along the spatial dimensions independently of input size. Pads the input such that if it was used in a convolution with 'VALID' padding, the output would have the same dimensions as if the unpadded input was used in a convolution with 'SAME' padding. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. rate: An integer, rate for atrous convolution. Returns: output: A tensor of size [batch, height_out, width_out, channels] with the input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = tf.pad( inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs class _LayersOverride(object): """Alternative Keras layers interface for the Keras Resnet V1.""" def __init__(self, batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1): """Alternative tf.keras.layers interface, for use by the Keras Resnet V1. The class is used by the Keras applications kwargs injection API to modify the Resnet V1 Keras application with changes required by the Object Detection API. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. """ self._batchnorm_training = batchnorm_training self._batchnorm_scale = batchnorm_scale self._default_batchnorm_momentum = default_batchnorm_momentum self._default_batchnorm_epsilon = default_batchnorm_epsilon self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._depth_multiplier = depth_multiplier self.regularizer = tf.keras.regularizers.l2(weight_decay) self.initializer = tf.variance_scaling_initializer() def _FixedPaddingLayer(self, kernel_size, rate=1): # pylint: disable=invalid-name return tf.keras.layers.Lambda( lambda x: _fixed_padding(x, kernel_size, rate)) def Conv2D(self, filters, kernel_size, **kwargs): # pylint: disable=invalid-name """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras Resnet application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. """ # Apply the minimum depth to the convolution layers. filters = max(int(filters * self._depth_multiplier), self._min_depth) if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer # Set use_bias as false to keep it consistent with Slim Resnet model. kwargs['use_bias'] = False kwargs['padding'] = 'same' stride = kwargs.get('strides') if stride and kernel_size and stride > 1 and kernel_size > 1: kwargs['padding'] = 'valid' def padded_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D( filters, kernel_size, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def Activation(self, *args, **kwargs): # pylint: disable=unused-argument,invalid-name """Builds an activation layer. Overrides the Keras application Activation layer specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.layers.Activation` interface. **kwargs: Only the name is used, required to match `tf.keras.layers.Activation` interface. Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu, name=name) def BatchNormalization(self, **kwargs): # pylint: disable=invalid-name """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: kwargs['scale'] = self._batchnorm_scale kwargs['epsilon'] = self._default_batchnorm_epsilon return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, momentum=self._default_batchnorm_momentum, **kwargs) def Input(self, shape): # pylint: disable=invalid-name """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: A tuple of integers representing the shape of the input, which includes both spatial share and channels, but not the batch size. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) def MaxPooling2D(self, pool_size, **kwargs): # pylint: disable=invalid-name """Builds a MaxPooling2D layer with default padding as 'SAME'. This is specified by the default resnet arg_scope in slim. Args: pool_size: The pool size specified by the Keras application. **kwargs: Ignored, required to match the Keras applications usage. Returns: A MaxPooling2D layer with default padding as 'SAME'. """ kwargs['padding'] = 'same' return tf.keras.layers.MaxPooling2D(pool_size, **kwargs) # Add alias as Keras also has it. MaxPool2D = MaxPooling2D # pylint: disable=invalid-name def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument,invalid-name """Replaces explicit padding in the Keras application with a no-op. Args: padding: The padding values for image height and width. **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def resnet_v1_50(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-50 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet50( layers=layers_override, **kwargs) def resnet_v1_101(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-101 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet101( layers=layers_override, **kwargs) def resnet_v1_152(batchnorm_training, batchnorm_scale=True, default_batchnorm_momentum=0.997, default_batchnorm_epsilon=1e-5, weight_decay=0.0001, conv_hyperparams=None, min_depth=8, depth_multiplier=1, **kwargs): """Instantiates the Resnet50 architecture, modified for object detection. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the epsilon. weight_decay: The weight decay to use for regularizing the model. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default resnet_v1 layer builders. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras ResnetV1-152 model instance. """ layers_override = _LayersOverride( batchnorm_training, batchnorm_scale=batchnorm_scale, default_batchnorm_momentum=default_batchnorm_momentum, default_batchnorm_epsilon=default_batchnorm_epsilon, conv_hyperparams=conv_hyperparams, weight_decay=weight_decay, min_depth=min_depth, depth_multiplier=depth_multiplier) return tf.keras.applications.resnet.ResNet152( layers=layers_override, **kwargs) # pylint: enable=invalid-name # The following codes are based on the existing keras ResNet model pattern: # google3/third_party/tensorflow/python/keras/applications/resnet.py def block_basic(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): """A residual block for ResNet18/34. Args: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. conv_shortcut: default False, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. Returns: Output tensor for the residual block. """ layers = tf.keras.layers bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1 preact = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')( x) preact = layers.Activation('relu', name=name + '_preact_relu')(preact) if conv_shortcut: shortcut = layers.Conv2D( filters, 1, strides=1, name=name + '_0_conv')( preact) else: shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x x = layers.ZeroPadding2D( padding=((1, 1), (1, 1)), name=name + '_1_pad')( preact) x = layers.Conv2D( filters, kernel_size, strides=1, use_bias=False, name=name + '_1_conv')( x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')( x) x = layers.Activation('relu', name=name + '_1_relu')(x) x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) x = layers.Conv2D( filters, kernel_size, strides=stride, use_bias=False, name=name + '_2_conv')( x) x = layers.BatchNormalization( axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')( x) x = layers.Activation('relu', name=name + '_2_relu')(x) x = layers.Add(name=name + '_out')([shortcut, x]) return x def stack_basic(x, filters, blocks, stride1=2, name=None): """A set of stacked residual blocks for ResNet18/34. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. name: string, stack label. Returns: Output tensor for the stacked blocks. """ x = block_basic(x, filters, conv_shortcut=True, name=name + '_block1') for i in range(2, blocks): x = block_basic(x, filters, name=name + '_block' + str(i)) x = block_basic( x, filters, stride=stride1, name=name + '_block' + str(blocks)) return x def resnet_v1_18(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the ResNet18 architecture.""" def stack_fn(x): x = stack_basic(x, 64, 2, stride1=1, name='conv2') x = stack_basic(x, 128, 2, name='conv3') x = stack_basic(x, 256, 2, name='conv4') return stack_basic(x, 512, 2, name='conv5') return resnet.ResNet( stack_fn, True, True, 'resnet18', include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation=classifier_activation) def resnet_v1_34(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the ResNet34 architecture.""" def stack_fn(x): x = stack_basic(x, 64, 3, stride1=1, name='conv2') x = stack_basic(x, 128, 4, name='conv3') x = stack_basic(x, 256, 6, name='conv4') return stack_basic(x, 512, 3, name='conv5') return resnet.ResNet( stack_fn, True, True, 'resnet34', include_top, weights, input_tensor, input_shape, pooling, classes, classifier_activation=classifier_activation)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/resnet_v1.py
resnet_v1.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name """Pads the input along the spatial dimensions independently of input size. Pads the input such that if it was used in a convolution with 'VALID' padding, the output would have the same dimensions as if the unpadded input was used in a convolution with 'SAME' padding. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. rate: An integer, rate for atrous convolution. Returns: output: A tensor of size [batch, height_out, width_out, channels] with the input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], [0, 0]]) return padded_inputs class _LayersOverride(object): """Alternative Keras layers interface for the Keras MobileNetV1.""" def __init__(self, batchnorm_training, default_batchnorm_momentum=0.999, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Alternative tf.keras.layers interface, for use by the Keras MobileNetV1. It is used by the Keras applications kwargs injection API to modify the MobilenetV1 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v1 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV1 paper. It modifies the number of filters in each convolutional layer. It's called depth multiplier in Keras application MobilenetV1. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v1 body. Default is `None` to use the default mobilenet_v1 network layout. """ self._alpha = alpha self._batchnorm_training = batchnorm_training self._default_batchnorm_momentum = default_batchnorm_momentum self._conv_hyperparams = conv_hyperparams self._use_explicit_padding = use_explicit_padding self._min_depth = min_depth self._conv_defs = conv_defs self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) self.initializer = tf.truncated_normal_initializer(stddev=0.09) def _FixedPaddingLayer(self, kernel_size, rate=1): return tf.keras.layers.Lambda( lambda x: _fixed_padding(x, kernel_size, rate)) def Conv2D(self, filters, kernel_size, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras MobileNetV1 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. kernel_size: The kernel size to specify the height and width of the 2D convolution window. In this function, the kernel size is expected to be pair of numbers and the numbers must be equal for this function. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. Raises: ValueError: if kernel size is not a pair of equal integers (representing a square kernel). """ if not isinstance(kernel_size, tuple): raise ValueError('kernel is expected to be a tuple.') if len(kernel_size) != 2: raise ValueError('kernel is expected to be length two.') if kernel_size[0] != kernel_size[1]: raise ValueError('kernel is expected to be square.') layer_name = kwargs['name'] if self._conv_defs: conv_filters = model_utils.get_conv_def(self._conv_defs, layer_name) if conv_filters: filters = conv_filters # Apply the width multiplier and the minimum depth to the convolution layers filters = int(filters * self._alpha) if self._min_depth and filters < self._min_depth: filters = self._min_depth if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer kwargs['padding'] = 'same' if self._use_explicit_padding and kernel_size[0] > 1: kwargs['padding'] = 'valid' def padded_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D( filters, kernel_size, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) def DepthwiseConv2D(self, kernel_size, **kwargs): """Builds a DepthwiseConv2D according to the Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: kernel_size: The kernel size to specify the height and width of the 2D convolution window. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras DepthwiseConv2D layer to the input argument, or that will first pad the input then apply the depthwise convolution. """ if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) # Both regularizer and initializaer also applies to depthwise layer in # MobilenetV1, so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] else: kwargs['depthwise_regularizer'] = self.regularizer kwargs['depthwise_initializer'] = self.initializer kwargs['padding'] = 'same' if self._use_explicit_padding: kwargs['padding'] = 'valid' def padded_depthwise_conv(features): # pylint: disable=invalid-name padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.DepthwiseConv2D( kernel_size, **kwargs)(padded_features) return padded_depthwise_conv else: return tf.keras.layers.DepthwiseConv2D(kernel_size, **kwargs) def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=1e-3, momentum=self._default_batchnorm_momentum, name=name) def Input(self, shape): """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: The shape for the input layer to use. (Does not include a dimension for the batch size). Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) # pylint: disable=unused-argument def ReLU(self, *args, **kwargs): """Builds an activation layer. Overrides the Keras application ReLU with the activation specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.ReLU` interface **kwargs: Only the name is used, required to match `tf.keras.ReLU` interface Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu6, name=name) # pylint: enable=unused-argument # pylint: disable=unused-argument def ZeroPadding2D(self, padding, **kwargs): """Replaces explicit padding in the Keras application with a no-op. Args: padding: The padding values for image height and width. **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # pylint: enable=unused-argument # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) # pylint: disable=invalid-name def mobilenet_v1(batchnorm_training, default_batchnorm_momentum=0.9997, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None, **kwargs): """Instantiates the MobileNetV1 architecture, modified for object detection. This wraps the MobileNetV1 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Changes the default batchnorm momentum to 0.9997 - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions - Makes the Input layer use a tf.placeholder_with_default instead of a tf.placeholder, to work on TPUs. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v1 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV1 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v1 body. Default is `None` to use the default mobilenet_v1 network layout. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.Mobilenet` method that constructs the Keras model. Returns: A Keras model instance. """ layers_override = _LayersOverride( batchnorm_training, default_batchnorm_momentum=default_batchnorm_momentum, conv_hyperparams=conv_hyperparams, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=alpha, conv_defs=conv_defs) return tf.keras.applications.MobileNet( alpha=alpha, layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v1.py
mobilenet_v1.py
"""A wrapper around the MobileNet v2 models for Keras, for object detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from object_detection.core import freezable_batch_norm from object_detection.models.keras_models import model_utils from object_detection.utils import ops # pylint: disable=invalid-name # This method copied from the slim mobilenet base network code (same license) def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class _LayersOverride(object): """Alternative Keras layers interface for the Keras MobileNetV2.""" def __init__(self, batchnorm_training, default_batchnorm_momentum=0.999, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None): """Alternative tf.keras.layers interface, for use by the Keras MobileNetV2. It is used by the Keras applications kwargs injection API to modify the Mobilenet v2 Keras application with changes required by the Object Detection API. These injected interfaces make the following changes to the network: - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v2 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV2 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v2 body. Default is `None` to use the default mobilenet_v2 network layout. """ self._alpha = alpha self._batchnorm_training = batchnorm_training self._default_batchnorm_momentum = default_batchnorm_momentum self._conv_hyperparams = conv_hyperparams self._use_explicit_padding = use_explicit_padding self._min_depth = min_depth self._conv_defs = conv_defs self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) self.initializer = tf.truncated_normal_initializer(stddev=0.09) def _FixedPaddingLayer(self, kernel_size): return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size)) def Conv2D(self, filters, **kwargs): """Builds a Conv2D layer according to the current Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: filters: The number of filters to use for the convolution. **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras Conv2D layer to the input argument, or that will first pad the input then apply a Conv2D layer. """ # Make sure 'alpha' is always applied to the last convolution block's size # (This overrides the Keras application's functionality) layer_name = kwargs.get('name') if layer_name == 'Conv_1': if self._conv_defs: filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1') else: filters = 1280 if self._alpha < 1.0: filters = _make_divisible(filters * self._alpha, 8) # Apply the minimum depth to the convolution layers if (self._min_depth and (filters < self._min_depth) and not kwargs.get('name').endswith('expand')): filters = self._min_depth if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) else: kwargs['kernel_regularizer'] = self.regularizer kwargs['kernel_initializer'] = self.initializer kwargs['padding'] = 'same' kernel_size = kwargs.get('kernel_size') if self._use_explicit_padding and kernel_size > 1: kwargs['padding'] = 'valid' def padded_conv(features): padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features) return padded_conv else: return tf.keras.layers.Conv2D(filters, **kwargs) def DepthwiseConv2D(self, **kwargs): """Builds a DepthwiseConv2D according to the Object Detection config. Overrides the Keras MobileNetV2 application's convolutions with ones that follow the spec specified by the Object Detection hyperparameters. Args: **kwargs: Keyword args specified by the Keras application for constructing the convolution. Returns: A one-arg callable that will either directly apply a Keras DepthwiseConv2D layer to the input argument, or that will first pad the input then apply the depthwise convolution. """ if self._conv_hyperparams: kwargs = self._conv_hyperparams.params(**kwargs) # Both the regularizer and initializer apply to the depthwise layer in # MobilenetV1, so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] else: kwargs['depthwise_regularizer'] = self.regularizer kwargs['depthwise_initializer'] = self.initializer kwargs['padding'] = 'same' kernel_size = kwargs.get('kernel_size') if self._use_explicit_padding and kernel_size > 1: kwargs['padding'] = 'valid' def padded_depthwise_conv(features): padded_features = self._FixedPaddingLayer(kernel_size)(features) return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features) return padded_depthwise_conv else: return tf.keras.layers.DepthwiseConv2D(**kwargs) def BatchNormalization(self, **kwargs): """Builds a normalization layer. Overrides the Keras application batch norm with the norm specified by the Object Detection configuration. Args: **kwargs: Only the name is used, all other params ignored. Required for matching `layers.BatchNormalization` calls in the Keras application. Returns: A normalization layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_batch_norm( training=self._batchnorm_training, name=name) else: return freezable_batch_norm.FreezableBatchNorm( training=self._batchnorm_training, epsilon=1e-3, momentum=self._default_batchnorm_momentum, name=name) def Input(self, shape): """Builds an Input layer. Overrides the Keras application Input layer with one that uses a tf.placeholder_with_default instead of a tf.placeholder. This is necessary to ensure the application works when run on a TPU. Args: shape: The shape for the input layer to use. (Does not include a dimension for the batch size). Returns: An input layer for the specified shape that internally uses a placeholder_with_default. """ default_size = 224 default_batch_size = 1 shape = list(shape) default_shape = [default_size if dim is None else dim for dim in shape] input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) placeholder_with_default = tf.placeholder_with_default( input=input_tensor, shape=[None] + shape) return model_utils.input_layer(shape, placeholder_with_default) # pylint: disable=unused-argument def ReLU(self, *args, **kwargs): """Builds an activation layer. Overrides the Keras application ReLU with the activation specified by the Object Detection configuration. Args: *args: Ignored, required to match the `tf.keras.ReLU` interface **kwargs: Only the name is used, required to match `tf.keras.ReLU` interface Returns: An activation layer specified by the Object Detection hyperparameter configurations. """ name = kwargs.get('name') if self._conv_hyperparams: return self._conv_hyperparams.build_activation_layer(name=name) else: return tf.keras.layers.Lambda(tf.nn.relu6, name=name) # pylint: enable=unused-argument # pylint: disable=unused-argument def ZeroPadding2D(self, **kwargs): """Replaces explicit padding in the Keras application with a no-op. Args: **kwargs: Ignored, required to match the Keras applications usage. Returns: A no-op identity lambda. """ return lambda x: x # pylint: enable=unused-argument # Forward all non-overridden methods to the keras layers def __getattr__(self, item): return getattr(tf.keras.layers, item) def mobilenet_v2(batchnorm_training, default_batchnorm_momentum=0.9997, conv_hyperparams=None, use_explicit_padding=False, alpha=1.0, min_depth=None, conv_defs=None, **kwargs): """Instantiates the MobileNetV2 architecture, modified for object detection. This wraps the MobileNetV2 tensorflow Keras application, but uses the Keras application's kwargs-based monkey-patching API to override the Keras architecture with the following changes: - Changes the default batchnorm momentum to 0.9997 - Applies the Object Detection hyperparameter configuration - Supports FreezableBatchNorms - Adds support for a min number of filters for each layer - Makes the `alpha` parameter affect the final convolution block even if it is less than 1.0 - Adds support for explicit padding of convolutions - Makes the Input layer use a tf.placeholder_with_default instead of a tf.placeholder, to work on TPUs. Args: batchnorm_training: Bool. Assigned to Batch norm layer `training` param when constructing `freezable_batch_norm.FreezableBatchNorm` layers. default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, batch norm layers will be constructed using this value as the momentum. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. Optionally set to `None` to use default mobilenet_v2 layer builders. use_explicit_padding: If True, use 'valid' padding for convolutions, but explicitly pre-pads inputs so that the output dimensions are the same as if 'same' padding were used. Off by default. alpha: The width multiplier referenced in the MobileNetV2 paper. It modifies the number of filters in each convolutional layer. min_depth: Minimum number of filters in the convolutional layers. conv_defs: Network layout to specify the mobilenet_v2 body. Default is `None` to use the default mobilenet_v2 network layout. **kwargs: Keyword arguments forwarded directly to the `tf.keras.applications.MobilenetV2` method that constructs the Keras model. Returns: A Keras model instance. """ layers_override = _LayersOverride( batchnorm_training, default_batchnorm_momentum=default_batchnorm_momentum, conv_hyperparams=conv_hyperparams, use_explicit_padding=use_explicit_padding, min_depth=min_depth, alpha=alpha, conv_defs=conv_defs) return tf.keras.applications.MobileNetV2(alpha=alpha, layers=layers_override, **kwargs) # pylint: enable=invalid-name
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/keras_models/mobilenet_v2.py
mobilenet_v2.py
import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list EPSILON = 1e-8 class SquareBoxCoder(box_coder.BoxCoder): """Encodes a 3-scalar representation of a square box.""" def __init__(self, scale_factors=None): """Constructor for SquareBoxCoder. Args: scale_factors: List of 3 positive scalars to scale ty, tx, and tl. If set to None, does not perform scaling. For faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0]. Raises: ValueError: If scale_factors is not length 3 or contains values less than or equal to 0. """ if scale_factors: if len(scale_factors) != 3: raise ValueError('The argument scale_factors must be a list of length ' '3.') if any(scalar <= 0 for scalar in scale_factors): raise ValueError('The values in scale_factors must all be greater ' 'than 0.') self._scale_factors = scale_factors @property def code_size(self): return 3 def _encode(self, boxes, anchors): """Encodes a box collection with respect to an anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, tl]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() la = tf.sqrt(ha * wa) ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() l = tf.sqrt(h * w) # Avoid NaN in division and log below. la += EPSILON l += EPSILON tx = (xcenter - xcenter_a) / la ty = (ycenter - ycenter_a) / la tl = tf.log(l / la) # Scales location targets for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] tl *= self._scale_factors[2] return tf.transpose(tf.stack([ty, tx, tl])) def _decode(self, rel_codes, anchors): """Decodes relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() la = tf.sqrt(ha * wa) ty, tx, tl = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] tl /= self._scale_factors[2] l = tf.exp(tl) * la ycenter = ty * la + ycenter_a xcenter = tx * la + xcenter_a ymin = ycenter - l / 2. xmin = xcenter - l / 2. ymax = ycenter + l / 2. xmax = xcenter + l / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/square_box_coder.py
square_box_coder.py
import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list EPSILON = 1e-8 class FasterRcnnBoxCoder(box_coder.BoxCoder): """Faster RCNN box coder.""" def __init__(self, scale_factors=None): """Constructor for FasterRcnnBoxCoder. Args: scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If set to None, does not perform scaling. For Faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. """ if scale_factors: assert len(scale_factors) == 4 for scalar in scale_factors: assert scalar > 0 self._scale_factors = scale_factors @property def code_size(self): return 4 def _encode(self, boxes, anchors): """Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.log(w / wa) th = tf.log(h / ha) # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] return tf.transpose(tf.stack([ty, tx, th, tw])) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/faster_rcnn_box_coder.py
faster_rcnn_box_coder.py
import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list from object_detection.core import standard_fields as fields EPSILON = 1e-8 class KeypointBoxCoder(box_coder.BoxCoder): """Keypoint box coder.""" def __init__(self, num_keypoints, scale_factors=None): """Constructor for KeypointBoxCoder. Args: num_keypoints: Number of keypoints to encode/decode. scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. In addition to scaling ty and tx, the first 2 scalars are used to scale the y and x coordinates of the keypoints as well. If set to None, does not perform scaling. """ self._num_keypoints = num_keypoints if scale_factors: assert len(scale_factors) == 4 for scalar in scale_factors: assert scalar > 0 self._scale_factors = scale_factors self._keypoint_scale_factors = None if scale_factors is not None: self._keypoint_scale_factors = tf.expand_dims( tf.tile([ tf.cast(scale_factors[0], dtype=tf.float32), tf.cast(scale_factors[1], dtype=tf.float32) ], [num_keypoints]), 1) @property def code_size(self): return 4 + self._num_keypoints * 2 def _encode(self, boxes, anchors): """Encode a box and keypoint collection with respect to anchor collection. Args: boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are tensors with the shape [N, 4], and keypoints are tensors with the shape [N, num_keypoints, 2]. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0 represent the y and x coordinates of the first keypoint, tky1 and tkx1 represent the y and x coordinates of the second keypoint, and so on. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() keypoints = boxes.get_field(fields.BoxListFields.keypoints) keypoints = tf.transpose(tf.reshape(keypoints, [-1, self._num_keypoints * 2])) num_boxes = boxes.num_boxes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.log(w / wa) th = tf.log(h / ha) tiled_anchor_centers = tf.tile( tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile( tf.stack([ha, wa]), [self._num_keypoints, 1]) tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes]) tboxes = tf.stack([ty, tx, th, tw]) return tf.transpose(tf.concat([tboxes, tkeypoints], 0)) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes and keypoints. Args: rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N anchor-encoded boxes and keypoints anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes and keypoints. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() num_codes = tf.shape(rel_codes)[0] result = tf.unstack(tf.transpose(rel_codes)) ty, tx, th, tw = result[:4] tkeypoints = result[4:] if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes]) w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. decoded_boxes_keypoints = box_list.BoxList( tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) tiled_anchor_centers = tf.tile( tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile( tf.stack([ha, wa]), [self._num_keypoints, 1]) keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers keypoints = tf.reshape(tf.transpose(keypoints), [-1, self._num_keypoints, 2]) decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints) return decoded_boxes_keypoints
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/keypoint_box_coder.py
keypoint_box_coder.py
"""Library functions for ContextRCNN.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tf_slim as slim # The negative value used in padding the invalid weights. _NEGATIVE_PADDING_VALUE = -100000 def filter_weight_value(weights, values, valid_mask): """Filters weights and values based on valid_mask. _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to avoid their contribution in softmax. 0 will be set for the invalid elements in the values. Args: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means valid and False means invalid. Returns: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. Raises: ValueError: If shape of doesn't match. """ w_batch_size, _, w_context_size = weights.shape v_batch_size, v_context_size, _ = values.shape m_batch_size, m_context_size = valid_mask.shape if w_batch_size != v_batch_size or v_batch_size != m_batch_size: raise ValueError("Please make sure the first dimension of the input" " tensors are the same.") if w_context_size != v_context_size: raise ValueError("Please make sure the third dimension of weights matches" " the second dimension of values.") if w_context_size != m_context_size: raise ValueError("Please make sure the third dimension of the weights" " matches the second dimension of the valid_mask.") valid_mask = valid_mask[..., tf.newaxis] # Force the invalid weights to be very negative so it won't contribute to # the softmax. very_negative_mask = tf.ones( weights.shape, dtype=weights.dtype) * _NEGATIVE_PADDING_VALUE valid_weight_mask = tf.tile(tf.transpose(valid_mask, perm=[0, 2, 1]), [1, weights.shape[1], 1]) weights = tf.where(valid_weight_mask, x=weights, y=very_negative_mask) # Force the invalid values to be 0. values *= tf.cast(valid_mask, values.dtype) return weights, values def compute_valid_mask(num_valid_elements, num_elements): """Computes mask of valid entries within padded context feature. Args: num_valid_elements: A int32 Tensor of shape [batch_size]. num_elements: An int32 Tensor. Returns: A boolean Tensor of the shape [batch_size, num_elements]. True means valid and False means invalid. """ batch_size = num_valid_elements.shape[0] element_idxs = tf.range(num_elements, dtype=tf.int32) batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) num_valid_elements = num_valid_elements[..., tf.newaxis] valid_mask = tf.less(batch_element_idxs, num_valid_elements) return valid_mask def project_features(features, projection_dimension, is_training, normalize): """Projects features to another feature space. Args: features: A float Tensor of shape [batch_size, features_size, num_features]. projection_dimension: A int32 Tensor. is_training: A boolean Tensor (affecting batch normalization). normalize: A boolean Tensor. If true, the output features will be l2 normalized on the last dimension. Returns: A float Tensor of shape [batch, features_size, projection_dimension]. """ # TODO(guanhangwu) Figure out a better way of specifying the batch norm # params. batch_norm_params = { "is_training": is_training, "decay": 0.97, "epsilon": 0.001, "center": True, "scale": True } batch_size, _, num_features = features.shape features = tf.reshape(features, [-1, num_features]) projected_features = slim.fully_connected( features, num_outputs=projection_dimension, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) projected_features = tf.reshape(projected_features, [batch_size, -1, projection_dimension]) if normalize: projected_features = tf.math.l2_normalize(projected_features, axis=-1) return projected_features def attention_block(input_features, context_features, bottleneck_dimension, output_dimension, attention_temperature, keys_values_valid_mask, queries_valid_mask, is_training, block_name="AttentionBlock"): """Generic attention block. Args: input_features: A float Tensor of shape [batch_size, input_size, num_input_features]. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. output_dimension: A int32 Tensor representing the last dimension of the output feature. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) keys_values_valid_mask: A boolean Tensor of shape [batch_size, context_size]. queries_valid_mask: A boolean Tensor of shape [batch_size, max_num_proposals]. is_training: A boolean Tensor (affecting batch normalization). block_name: A string to specify names for different attention blocks Returns: A float Tensor of shape [batch_size, input_size, output_dimension]. """ with tf.variable_scope(block_name): queries = project_features( input_features, bottleneck_dimension, is_training, normalize=True) keys = project_features( context_features, bottleneck_dimension, is_training, normalize=True) values = project_features( context_features, bottleneck_dimension, is_training, normalize=True) # masking out any keys which are padding keys *= tf.cast(keys_values_valid_mask[..., tf.newaxis], keys.dtype) queries *= tf.cast(queries_valid_mask[..., tf.newaxis], queries.dtype) weights = tf.matmul(queries, keys, transpose_b=True) weights, values = filter_weight_value(weights, values, keys_values_valid_mask) weights = tf.identity(tf.nn.softmax(weights / attention_temperature), name=block_name+"AttentionWeights") features = tf.matmul(weights, values) output_features = project_features( features, output_dimension, is_training, normalize=False) return output_features def _compute_box_context_attention(box_features, num_proposals, context_features, valid_context_size, bottleneck_dimension, attention_temperature, is_training, max_num_proposals, use_self_attention=False, use_long_term_attention=True, self_attention_in_sequence=False, num_attention_heads=1, num_attention_layers=1): """Computes the attention feature from the context given a batch of box. Args: box_features: A float Tensor of shape [batch_size * max_num_proposals, height, width, channels]. It is pooled features from first stage proposals. num_proposals: The number of valid box proposals. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) is_training: A boolean Tensor (affecting batch normalization). max_num_proposals: The number of box proposals for each image. use_self_attention: Whether to use an attention block across the first stage predicted box features for the input image. use_long_term_attention: Whether to use an attention block into the context features. self_attention_in_sequence: Whether self-attention and long term attention should be in sequence or parallel. num_attention_heads: Number of heads for multi-headed attention. num_attention_layers: Number of heads for multi-layered attention. Returns: A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels]. """ _, context_size, _ = context_features.shape context_valid_mask = compute_valid_mask(valid_context_size, context_size) total_proposals, height, width, channels = box_features.shape batch_size = total_proposals // max_num_proposals box_features = tf.reshape( box_features, [batch_size, max_num_proposals, height, width, channels]) # Average pools over height and width dimension so that the shape of # box_features becomes [batch_size, max_num_proposals, channels]. box_features = tf.reduce_mean(box_features, [2, 3]) box_valid_mask = compute_valid_mask( num_proposals, box_features.shape[1]) if use_self_attention: self_attention_box_features = attention_block( box_features, box_features, bottleneck_dimension, channels.value, attention_temperature, keys_values_valid_mask=box_valid_mask, queries_valid_mask=box_valid_mask, is_training=is_training, block_name="SelfAttentionBlock") if use_long_term_attention: if use_self_attention and self_attention_in_sequence: input_features = tf.add(self_attention_box_features, box_features) input_features = tf.divide(input_features, 2) else: input_features = box_features original_input_features = input_features for jdx in range(num_attention_layers): layer_features = tf.zeros_like(input_features) for idx in range(num_attention_heads): block_name = "AttentionBlock" + str(idx) + "_AttentionLayer" +str(jdx) attention_features = attention_block( input_features, context_features, bottleneck_dimension, channels.value, attention_temperature, keys_values_valid_mask=context_valid_mask, queries_valid_mask=box_valid_mask, is_training=is_training, block_name=block_name) layer_features = tf.add(layer_features, attention_features) layer_features = tf.divide(layer_features, num_attention_heads) input_features = tf.add(input_features, layer_features) output_features = tf.add(input_features, original_input_features) if not self_attention_in_sequence and use_self_attention: output_features = tf.add(self_attention_box_features, output_features) elif use_self_attention: output_features = self_attention_box_features else: output_features = tf.zeros(self_attention_box_features.shape) # Expands the dimension back to match with the original feature map. output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] return output_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib.py
context_rcnn_lib.py
import abc import collections import functools import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner as cn_assigner from object_detection.utils import shape_utils from object_detection.utils import target_assigner_utils as ta_utils from object_detection.utils import tf_version # Number of channels needed to predict size and offsets. NUM_OFFSET_CHANNELS = 2 NUM_SIZE_CHANNELS = 2 # Error range for detecting peaks. PEAK_EPSILON = 1e-6 class CenterNetFeatureExtractor(tf.keras.Model): """Base class for feature extractors for the CenterNet meta architecture. Child classes are expected to override the _output_model property which will return 1 or more tensors predicted by the feature extractor. """ __metaclass__ = abc.ABCMeta def __init__(self, name=None, channel_means=(0., 0., 0.), channel_stds=(1., 1., 1.), bgr_ordering=False): """Initializes a CenterNet feature extractor. Args: name: str, the name used for the underlying keras model. channel_means: A tuple of floats, denoting the mean of each channel which will be subtracted from it. If None or empty, we use 0s. channel_stds: A tuple of floats, denoting the standard deviation of each channel. Each channel will be divided by its standard deviation value. If None or empty, we use 1s. bgr_ordering: bool, if set will change the channel ordering to be in the [blue, red, green] order. """ super(CenterNetFeatureExtractor, self).__init__(name=name) if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test channel_means = [0., 0., 0.] if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test channel_stds = [1., 1., 1.] self._channel_means = channel_means self._channel_stds = channel_stds self._bgr_ordering = bgr_ordering def preprocess(self, inputs): """Converts a batch of unscaled images to a scale suitable for the model. This method normalizes the image using the given `channel_means` and `channels_stds` values at initialization time while optionally flipping the channel order if `bgr_ordering` is set. Args: inputs: a [batch, height, width, channels] float32 tensor Returns: outputs: a [batch, height, width, channels] float32 tensor """ if self._bgr_ordering: red, green, blue = tf.unstack(inputs, axis=3) inputs = tf.stack([blue, green, red], axis=3) channel_means = tf.reshape(tf.constant(self._channel_means), [1, 1, 1, -1]) channel_stds = tf.reshape(tf.constant(self._channel_stds), [1, 1, 1, -1]) return (inputs - channel_means)/channel_stds @property @abc.abstractmethod def out_stride(self): """The stride in the output image of the network.""" pass @property @abc.abstractmethod def num_feature_outputs(self): """Ther number of feature outputs returned by the feature extractor.""" pass @property def classification_backbone(self): raise NotImplementedError( 'Classification backbone not supported for {}'.format(type(self))) def make_prediction_net(num_out_channels, kernel_sizes=(3), num_filters=(256), bias_fill=None, use_depthwise=False, name=None, unit_height_conv=True): """Creates a network to predict the given number of output channels. This function is intended to make the prediction heads for the CenterNet meta architecture. Args: num_out_channels: Number of output channels. kernel_sizes: A list representing the sizes of the conv kernel in the intermediate layer. Note that the length of the list indicates the number of intermediate conv layers and it must be the same as the length of the num_filters. num_filters: A list representing the number of filters in the intermediate conv layer. Note that the length of the list indicates the number of intermediate conv layers. bias_fill: If not None, is used to initialize the bias in the final conv layer. use_depthwise: If true, use SeparableConv2D to construct the Sequential layers instead of Conv2D. name: Optional name for the prediction net. unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1. Returns: net: A keras module which when called on an input tensor of size [batch_size, height, width, num_in_channels] returns an output of size [batch_size, height, width, num_out_channels] """ if isinstance(kernel_sizes, int) and isinstance(num_filters, int): kernel_sizes = [kernel_sizes] num_filters = [num_filters] assert len(kernel_sizes) == len(num_filters) if use_depthwise: conv_fn = tf.keras.layers.SeparableConv2D else: conv_fn = tf.keras.layers.Conv2D # We name the convolution operations explicitly because Keras, by default, # uses different names during training and evaluation. By setting the names # here, we avoid unexpected pipeline breakage in TF1. out_conv = tf.keras.layers.Conv2D( num_out_channels, kernel_size=1, name='conv1' if tf_version.is_tf1() else None) if bias_fill is not None: out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill) layers = [] for idx, (kernel_size, num_filter) in enumerate(zip(kernel_sizes, num_filters)): layers.append( conv_fn( num_filter, kernel_size=[1, kernel_size] if unit_height_conv else kernel_size, padding='same', name='conv2_%d' % idx if tf_version.is_tf1() else None)) layers.append(tf.keras.layers.ReLU()) layers.append(out_conv) net = tf.keras.Sequential(layers, name=name) return net def _to_float32(x): return tf.cast(x, tf.float32) def _get_shape(tensor, num_dims): assert len(tensor.shape.as_list()) == num_dims return shape_utils.combined_static_and_dynamic_shape(tensor) def _flatten_spatial_dimensions(batch_images): batch_size, height, width, channels = _get_shape(batch_images, 4) return tf.reshape(batch_images, [batch_size, height * width, channels]) def _multi_range(limit, value_repetitions=1, range_repetitions=1, dtype=tf.int32): """Creates a sequence with optional value duplication and range repetition. As an example (see the Args section for more details), _multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns: [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1] Args: limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive. value_repetitions: Integer. The number of times a value in the sequence is repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..]. range_repetitions: Integer. The number of times the range is repeated. With range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..]. dtype: The type of the elements of the resulting tensor. Returns: A 1-D tensor of type `dtype` and size [`limit` * `value_repetitions` * `range_repetitions`] that contains the specified range with given repetitions. """ return tf.reshape( tf.tile( tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1), multiples=[range_repetitions, value_repetitions]), [-1]) def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, per_channel=False): """Returns the top k scores and their locations in a feature map. Given a feature map, the top k values (based on activation) are returned. If `per_channel` is True, the top k values **per channel** are returned. Note that when k equals to 1, ths function uses reduce_max and argmax instead of top_k to make the logics more efficient. The `max_pool_kernel_size` argument allows for selecting local peaks in a region. This filtering is done per channel, so nothing prevents two values at the same location to be returned. Args: feature_map: [batch, height, width, channels] float32 feature map. max_pool_kernel_size: integer, the max pool kernel size to use to pull off peak score locations in a neighborhood (independently for each channel). For example, to make sure no two neighboring values (in the same channel) are returned, set max_pool_kernel_size=3. If None or 1, will not apply max pooling. k: The number of highest scoring locations to return. per_channel: If True, will return the top k scores and locations per feature map channel. If False, the top k across the entire feature map (height x width x channels) are returned. Returns: Tuple of scores: A [batch, N] float32 tensor with scores from the feature map in descending order. If per_channel is False, N = k. Otherwise, N = k * channels, and the first k elements correspond to channel 0, the second k correspond to channel 1, etc. y_indices: A [batch, N] int tensor with y indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. x_indices: A [batch, N] int tensor with x indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. channel_indices: A [batch, N] int tensor with channel indices of the top k feature map locations. If per_channel is False, N = k. Otherwise, N = k * channels. """ if not max_pool_kernel_size or max_pool_kernel_size == 1: feature_map_peaks = feature_map else: feature_map_max_pool = tf.nn.max_pool( feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME') feature_map_peak_mask = tf.math.abs( feature_map - feature_map_max_pool) < PEAK_EPSILON # Zero out everything that is not a peak. feature_map_peaks = ( feature_map * _to_float32(feature_map_peak_mask)) batch_size, _, width, num_channels = _get_shape(feature_map, 4) if per_channel: if k == 1: feature_map_flattened = tf.reshape( feature_map_peaks, [batch_size, -1, num_channels]) scores = tf.math.reduce_max(feature_map_flattened, axis=1) peak_flat_indices = tf.math.argmax( feature_map_flattened, axis=1, output_type=tf.dtypes.int32) peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1) else: # Perform top k over batch and channels. feature_map_peaks_transposed = tf.transpose(feature_map_peaks, perm=[0, 3, 1, 2]) feature_map_peaks_transposed = tf.reshape( feature_map_peaks_transposed, [batch_size, num_channels, -1]) scores, peak_flat_indices = tf.math.top_k( feature_map_peaks_transposed, k=k) # Convert the indices such that they represent the location in the full # (flattened) feature map of size [batch, height * width * channels]. channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis] peak_flat_indices = num_channels * peak_flat_indices + channel_idx scores = tf.reshape(scores, [batch_size, -1]) peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1]) else: if k == 1: feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True) peak_flat_indices = tf.expand_dims(tf.math.argmax( feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1) else: feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k) # Get x, y and channel indices corresponding to the top indices in the flat # array. y_indices, x_indices, channel_indices = ( row_col_channel_indices_from_flattened_indices( peak_flat_indices, width, num_channels)) return scores, y_indices, x_indices, channel_indices def prediction_tensors_to_boxes(y_indices, x_indices, height_width_predictions, offset_predictions): """Converts CenterNet class-center, offset and size predictions to boxes. Args: y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to object center locations (expressed in output coordinate frame). x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to object center locations (expressed in output coordinate frame). height_width_predictions: A float tensor of shape [batch_size, height, width, 2] representing the height and width of a box centered at each pixel. offset_predictions: A float tensor of shape [batch_size, height, width, 2] representing the y and x offsets of a box centered at each pixel. This helps reduce the error from downsampling. Returns: detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the the raw bounding box coordinates of boxes. """ batch_size, num_boxes = _get_shape(y_indices, 2) _, height, width, _ = _get_shape(height_width_predictions, 4) height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_boxes), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) new_height_width = tf.gather_nd(height_width_predictions, combined_indices) new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2]) new_offsets = tf.gather_nd(offset_predictions, combined_indices) offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2]) y_indices = _to_float32(y_indices) x_indices = _to_float32(x_indices) height_width = tf.maximum(new_height_width, 0) heights, widths = tf.unstack(height_width, axis=2) y_offsets, x_offsets = tf.unstack(offsets, axis=2) ymin = y_indices + y_offsets - heights / 2.0 xmin = x_indices + x_offsets - widths / 2.0 ymax = y_indices + y_offsets + heights / 2.0 xmax = x_indices + x_offsets + widths / 2.0 ymin = tf.clip_by_value(ymin, 0., height) xmin = tf.clip_by_value(xmin, 0., width) ymax = tf.clip_by_value(ymax, 0., height) xmax = tf.clip_by_value(xmax, 0., width) boxes = tf.stack([ymin, xmin, ymax, xmax], axis=2) return boxes def prediction_tensors_to_temporal_offsets( y_indices, x_indices, offset_predictions): """Converts CenterNet temporal offset map predictions to batched format. This function is similar to the box offset conversion function, as both temporal offsets and box offsets are size-2 vectors. Args: y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to object center locations (expressed in output coordinate frame). x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to object center locations (expressed in output coordinate frame). offset_predictions: A float tensor of shape [batch_size, height, width, 2] representing the y and x offsets of a box's center across adjacent frames. Returns: offsets: A tensor of shape [batch_size, num_boxes, 2] holding the the object temporal offsets of (y, x) dimensions. """ batch_size, num_boxes = _get_shape(y_indices, 2) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_boxes), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) new_offsets = tf.gather_nd(offset_predictions, combined_indices) offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1]) return offsets def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_threshold=0.1, max_pool_kernel_size=1, max_candidates=20, keypoint_depths=None): """Convert keypoint heatmap predictions and offsets to keypoint candidates. Args: keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, width, num_keypoints] representing the per-keypoint heatmaps. keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, width, 2] (or [batch_size, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_score_threshold: float, the threshold for considering a keypoint a candidate. max_pool_kernel_size: integer, the max pool kernel size to use to pull off peak score locations in a neighborhood. For example, to make sure no two neighboring values for the same keypoint are returned, set max_pool_kernel_size=3. If None or 1, will not apply any local filtering. max_candidates: integer, maximum number of keypoint candidates per keypoint type. keypoint_depths: (optional) A float tensor of shape [batch_size, height, width, 1] (or [batch_size, height, width, num_keypoints] if 'per_keypoint_depth' is set True) representing the per-keypoint depths. Returns: keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] with the scores for each keypoint candidate. The scores come directly from the heatmap predictions. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] with the number of candidates for each keypoint type, as it's possible to filter some candidates due to the score threshold. depth_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints] representing the estimated depth of each keypoint candidate. Return None if the input keypoint_depths is None. """ batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4) # Get x, y and channel indices corresponding to the top indices in the # keypoint heatmap predictions. # Note that the top k candidates are produced for **each keypoint type**. # Might be worth eventually trying top k in the feature map, independent of # the keypoint type. keypoint_scores, y_indices, x_indices, channel_indices = ( top_k_feature_map_locations(keypoint_heatmap_predictions, max_pool_kernel_size=max_pool_kernel_size, k=max_candidates, per_channel=True)) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. _, num_indices = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets, combined_indices) selected_offsets = tf.reshape(selected_offsets_flat, [batch_size, num_indices, -1]) y_indices = _to_float32(y_indices) x_indices = _to_float32(x_indices) _, _, num_channels = _get_shape(selected_offsets, 3) if num_channels > 2: # Offsets are per keypoint and the last dimension of selected_offsets # contains all those offsets, so reshape the offsets to make sure that the # last dimension contains (y_offset, x_offset) for a single keypoint. reshaped_offsets = tf.reshape(selected_offsets, [batch_size, num_indices, -1, 2]) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. In this # case, channel_indices indicates which keypoint to use the offset from. channel_combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), _multi_range(num_indices, range_repetitions=batch_size), tf.reshape(channel_indices, [-1]) ], axis=1) offsets = tf.gather_nd(reshaped_offsets, channel_combined_indices) offsets = tf.reshape(offsets, [batch_size, num_indices, -1]) else: offsets = selected_offsets y_offsets, x_offsets = tf.unstack(offsets, axis=2) keypoint_candidates = tf.stack([y_indices + y_offsets, x_indices + x_offsets], axis=2) keypoint_candidates = tf.reshape( keypoint_candidates, [batch_size, num_keypoints, max_candidates, 2]) keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) keypoint_scores = tf.reshape( keypoint_scores, [batch_size, num_keypoints, max_candidates]) keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1]) num_candidates = tf.reduce_sum( tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1) depth_candidates = None if keypoint_depths is not None: selected_depth_flat = tf.gather_nd(keypoint_depths, combined_indices) selected_depth = tf.reshape(selected_depth_flat, [batch_size, num_indices, -1]) _, _, num_depth_channels = _get_shape(selected_depth, 3) if num_depth_channels > 1: combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), _multi_range(num_indices, range_repetitions=batch_size), tf.reshape(channel_indices, [-1]) ], axis=1) depth = tf.gather_nd(selected_depth, combined_indices) depth = tf.reshape(depth, [batch_size, num_indices, -1]) else: depth = selected_depth depth_candidates = tf.reshape(depth, [batch_size, num_keypoints, max_candidates]) depth_candidates = tf.transpose(depth_candidates, [0, 2, 1]) return keypoint_candidates, keypoint_scores, num_candidates, depth_candidates def argmax_feature_map_locations(feature_map): """Returns the peak locations in the feature map.""" batch_size, _, width, num_channels = _get_shape(feature_map, 4) feature_map_flattened = tf.reshape( feature_map, [batch_size, -1, num_channels]) peak_flat_indices = tf.math.argmax( feature_map_flattened, axis=1, output_type=tf.dtypes.int32) # Get x and y indices corresponding to the top indices in the flat array. y_indices, x_indices = ( row_col_indices_from_flattened_indices(peak_flat_indices, width)) channel_indices = tf.tile( tf.range(num_channels)[tf.newaxis, :], [batch_size, 1]) return y_indices, x_indices, channel_indices def prediction_tensors_to_single_instance_kpts( keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_heatmap=None): """Convert keypoint heatmap predictions and offsets to keypoint candidates. Args: keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, width, num_keypoints] representing the per-keypoint heatmaps which is used for finding the best keypoint candidate locations. keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, width, 2] (or [batch_size, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_score_heatmap: (optional) A float tensor of shape [batch_size, height, width, num_keypoints] representing the heatmap which is used for reporting the confidence scores. If not provided, then the values in the keypoint_heatmap_predictions will be used. Returns: keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] with the scores for each keypoint candidate. The scores come directly from the heatmap predictions. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] with the number of candidates for each keypoint type, as it's possible to filter some candidates due to the score threshold. """ batch_size, height, width, num_keypoints = _get_shape( keypoint_heatmap_predictions, 4) # Get x, y and channel indices corresponding to the top indices in the # keypoint heatmap predictions. y_indices, x_indices, channel_indices = argmax_feature_map_locations( keypoint_heatmap_predictions) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. _, num_keypoints = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_keypoints), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]), tf.reshape(channel_indices, [-1]) ], axis=1) # Reshape the offsets predictions to shape: # [batch_size, height, width, num_keypoints, 2] keypoint_heatmap_offsets = tf.reshape( keypoint_heatmap_offsets, [batch_size, height, width, num_keypoints, -1]) # shape: [num_keypoints, 2] selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets, combined_indices) y_offsets, x_offsets = tf.unstack(selected_offsets_flat, axis=1) keypoint_candidates = tf.stack([ tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0), tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0) ], axis=2) keypoint_candidates = tf.expand_dims(keypoint_candidates, axis=0) if keypoint_score_heatmap is None: keypoint_scores = tf.gather_nd( keypoint_heatmap_predictions, combined_indices) else: keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices) keypoint_scores = tf.expand_dims( tf.expand_dims(keypoint_scores, axis=0), axis=0) return keypoint_candidates, keypoint_scores def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x, score_distance_offset): """Rescores heatmap using the distance information. Rescore the heatmap scores using the formula: score / (d + score_distance_offset), where the d is the distance from each pixel location to the target point location. Args: y_grid: A float tensor with shape [height, width] representing the y-coordinate of each pixel grid. x_grid: A float tensor with shape [height, width] representing the x-coordinate of each pixel grid. heatmap: A float tensor with shape [1, height, width, channel] representing the heatmap to be rescored. points_y: A float tensor with shape [channel] representing the y coordinates of the target points for each channel. points_x: A float tensor with shape [channel] representing the x coordinates of the target points for each channel. score_distance_offset: A constant used in the above formula. Returns: A float tensor with shape [1, height, width, channel] representing the rescored heatmap. """ y_diff = y_grid[:, :, tf.newaxis] - points_y x_diff = x_grid[:, :, tf.newaxis] - points_x distance = tf.math.sqrt(y_diff**2 + x_diff**2) return tf.math.divide(heatmap, distance + score_distance_offset) def prediction_to_single_instance_keypoints( object_heatmap, keypoint_heatmap, keypoint_offset, keypoint_regression, kp_params, keypoint_depths=None): """Postprocess function to predict single instance keypoints. This is a simplified postprocessing function based on the assumption that there is only one instance in the image. If there are multiple instances in the image, the model prefers to predict the one that is closest to the image center. Here is a high-level description of what this function does: 1) Object heatmap re-weighted by the distance between each pixel to the image center is used to determine the instance center. 2) Regressed keypoint locations are retrieved from the instance center. The Gaussian kernel is applied to the regressed keypoint locations to re-weight the keypoint heatmap. This is to select the keypoints that are associated with the center instance without using top_k op. 3) The keypoint locations are computed by the re-weighted keypoint heatmap and the keypoint offset. Args: object_heatmap: A float tensor of shape [1, height, width, 1] representing the heapmap of the class. keypoint_heatmap: A float tensor of shape [1, height, width, num_keypoints] representing the per-keypoint heatmaps. keypoint_offset: A float tensor of shape [1, height, width, 2] (or [1, height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True) representing the per-keypoint offsets. keypoint_regression: A float tensor of shape [1, height, width, 2 * num_keypoints] representing the joint regression prediction. kp_params: A `KeypointEstimationParams` object with parameters for a single keypoint class. keypoint_depths: (optional) A float tensor of shape [batch_size, height, width, 1] (or [batch_size, height, width, num_keypoints] if 'per_keypoint_depth' is set True) representing the per-keypoint depths. Returns: A tuple of two tensors: keypoint_candidates: A float tensor with shape [1, 1, num_keypoints, 2] representing the yx-coordinates of the keypoints in the output feature map space. keypoint_scores: A float tensor with shape [1, 1, num_keypoints] representing the keypoint prediction scores. Raises: ValueError: if the input keypoint_std_dev doesn't have valid number of elements (1 or num_keypoints). """ # TODO(yuhuic): add the keypoint depth prediction logics in the browser # postprocessing back. del keypoint_depths num_keypoints = len(kp_params.keypoint_std_dev) batch_size, height, width, _ = _get_shape(keypoint_heatmap, 4) # Create the image center location. image_center_y = tf.convert_to_tensor([0.5 * height], dtype=tf.float32) image_center_x = tf.convert_to_tensor([0.5 * width], dtype=tf.float32) (y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width) # Rescore the object heatmap by the distnace to the image center. object_heatmap = _score_to_distance_map( y_grid, x_grid, object_heatmap, image_center_y, image_center_x, kp_params.score_distance_offset) # Pick the highest score and location of the weighted object heatmap. y_indices, x_indices, _ = argmax_feature_map_locations(object_heatmap) _, num_indices = _get_shape(y_indices, 2) combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_indices), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) # Select the regression vectors from the object center. selected_regression_flat = tf.gather_nd(keypoint_regression, combined_indices) # shape: [num_keypoints, 2] regression_offsets = tf.reshape(selected_regression_flat, [num_keypoints, -1]) (y_reg, x_reg) = tf.unstack(regression_offsets, axis=1) y_regressed = tf.cast(y_indices, dtype=tf.float32) + y_reg x_regressed = tf.cast(x_indices, dtype=tf.float32) + x_reg if kp_params.candidate_ranking_mode == 'score_distance_ratio': reweighted_keypoint_heatmap = _score_to_distance_map( y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed, kp_params.score_distance_offset) else: raise ValueError('Unsupported candidate_ranking_mode: %s' % kp_params.candidate_ranking_mode) # Get the keypoint locations/scores: # keypoint_candidates: [1, 1, num_keypoints, 2] # keypoint_scores: [1, 1, num_keypoints] # depth_candidates: [1, 1, num_keypoints] (keypoint_candidates, keypoint_scores ) = prediction_tensors_to_single_instance_kpts( reweighted_keypoint_heatmap, keypoint_offset, keypoint_score_heatmap=keypoint_heatmap) return keypoint_candidates, keypoint_scores, None def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, y_indices, x_indices): """Returns the regressed keypoints at specified object centers. The original keypoint predictions are regressed relative to each feature map location. The returned keypoints are expressed in absolute coordinates in the output frame (i.e. the center offsets are added to each individual regressed set of keypoints). Args: regressed_keypoint_predictions: A float tensor of shape [batch_size, height, width, 2 * num_keypoints] holding regressed keypoints. The last dimension has keypoint coordinates ordered as follows: [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints. y_indices: A [batch, num_instances] int tensor holding y indices for object centers. These indices correspond to locations in the output feature map. x_indices: A [batch, num_instances] int tensor holding x indices for object centers. These indices correspond to locations in the output feature map. Returns: A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where regressed keypoints are gathered at the provided locations, and converted to absolute coordinates in the output coordinate frame. """ batch_size, num_instances = _get_shape(y_indices, 2) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range(batch_size, value_repetitions=num_instances), tf.reshape(y_indices, [-1]), tf.reshape(x_indices, [-1]) ], axis=1) relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions, combined_indices) relative_regressed_keypoints = tf.reshape( relative_regressed_keypoints, [batch_size, num_instances, -1, 2]) relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack( relative_regressed_keypoints, axis=3) y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1)) x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1)) absolute_regressed_keypoints = tf.stack( [y_indices + relative_regressed_keypoints_y, x_indices + relative_regressed_keypoints_x], axis=3) return tf.reshape(absolute_regressed_keypoints, [batch_size, num_instances, -1]) def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=None, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', score_distance_offset=1e-6, keypoint_depth_candidates=None, keypoint_score_threshold=0.1): """Refines regressed keypoints by snapping to the nearest candidate keypoints. The initial regressed keypoints represent a full set of keypoints regressed from the centers of the objects. The keypoint candidates are estimated independently from heatmaps, and are not associated with any object instances. This function refines the regressed keypoints by "snapping" to the nearest/highest score/highest score-distance ratio (depending on the candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose"). If no candidates are nearby, the regressed keypoint remains unchanged. In order to snap a regressed keypoint to a candidate keypoint, the following must be satisfied: - the candidate keypoint must be of the same type as the regressed keypoint - the candidate keypoint must not lie outside the predicted boxes (or the boxes which encloses the regressed keypoints for the instance if `bboxes` is not provided). Note that the box is scaled by `regressed_box_scale` in height and width, to provide some margin around the keypoints - the distance to the closest candidate keypoint cannot exceed candidate_search_scale * max(height, width), where height and width refer to the bounding box for the instance. Note that the same candidate keypoint is allowed to snap to regressed keypoints in difference instances. Args: regressed_keypoints: A float tensor of shape [batch_size, num_instances, num_keypoints, 2] with the initial regressed keypoints. keypoint_candidates: A tensor of shape [batch_size, max_candidates, num_keypoints, 2] holding the location of keypoint candidates in [y, x] format (expressed in absolute coordinates in the output coordinate frame). keypoint_scores: A float tensor of shape [batch_size, max_candidates, num_keypoints] indicating the scores for keypoint candidates. num_keypoint_candidates: An integer tensor of shape [batch_size, num_keypoints] indicating the number of valid candidates for each keypoint type, as there may be padding (dim 1) of `keypoint_candidates` and `keypoint_scores`. bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted bounding boxes for each instance, expressed in the output coordinate frame. If not provided, boxes will be computed from regressed keypoints. unmatched_keypoint_score: float, the default score to use for regressed keypoints that are not successfully snapped to a nearby candidate. box_scale: float, the multiplier to expand the bounding boxes (either the provided boxes or those which tightly cover the regressed keypoints) for an instance. This scale is typically larger than 1.0 when not providing `bboxes`. candidate_search_scale: float, the scale parameter that multiplies the largest dimension of a bounding box. The resulting distance becomes a search radius for candidates in the vicinity of each regressed keypoint. candidate_ranking_mode: A string as one of ['min_distance', 'score_distance_ratio'] indicating how to select the candidate. If invalid value is provided, an ValueError will be raised. score_distance_offset: The distance offset to apply in the denominator when candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize in this scenario is score / (distance + score_distance_offset). Larger values of score_distance_offset make the keypoint score gain more relative importance. keypoint_depth_candidates: (optional) A float tensor of shape [batch_size, max_candidates, num_keypoints] indicating the depths for keypoint candidates. keypoint_score_threshold: float, The heatmap score threshold for a keypoint to become a valid candidate. Returns: A tuple with: refined_keypoints: A float tensor of shape [batch_size, num_instances, num_keypoints, 2] with the final, refined keypoints. refined_scores: A float tensor of shape [batch_size, num_instances, num_keypoints] with scores associated with all instances and keypoints in `refined_keypoints`. Raises: ValueError: if provided candidate_ranking_mode is not one of ['min_distance', 'score_distance_ratio'] """ batch_size, num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)) max_candidates = keypoint_candidates.shape[1] # Replace all invalid (i.e. padded) keypoint candidates with NaN. # This will prevent them from being considered. range_tiled = tf.tile( tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), [batch_size, 1, num_keypoints]) num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), [1, max_candidates, 1]) invalid_candidates = range_tiled >= num_candidates_tiled # Pairwise squared distances between regressed keypoints and candidate # keypoints (for a single keypoint type). # Shape [batch_size, num_instances, 1, num_keypoints, 2]. regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, axis=2) # Shape [batch_size, 1, max_candidates, num_keypoints, 2]. keypoint_candidates_expanded = tf.expand_dims( keypoint_candidates, axis=1) # Use explicit tensor shape broadcasting (since the tensor dimensions are # expanded to 5D) to make it tf.lite compatible. regressed_keypoint_expanded = tf.tile( regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1]) keypoint_candidates_expanded = tf.tile( keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1]) # Replace tf.math.squared_difference by "-" operator and tf.multiply ops since # tf.lite convert doesn't support squared_difference with undetermined # dimension. diff = regressed_keypoint_expanded - keypoint_candidates_expanded sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1) distances = tf.math.sqrt(sqrd_distances) # Replace the invalid candidated with large constant (10^5) to make sure the # following reduce_min/argmin behaves properly. max_dist = 1e5 distances = tf.where( tf.tile( tf.expand_dims(invalid_candidates, axis=1), multiples=[1, num_instances, 1, 1]), tf.ones_like(distances) * max_dist, distances ) # Determine the candidates that have the minimum distance to the regressed # keypoints. Shape [batch_size, num_instances, num_keypoints]. min_distances = tf.math.reduce_min(distances, axis=2) if candidate_ranking_mode == 'min_distance': nearby_candidate_inds = tf.math.argmin(distances, axis=2) elif candidate_ranking_mode == 'score_distance_ratio': # tiled_keypoint_scores: # Shape [batch_size, num_instances, max_candidates, num_keypoints]. tiled_keypoint_scores = tf.tile( tf.expand_dims(keypoint_scores, axis=1), multiples=[1, num_instances, 1, 1]) ranking_scores = tiled_keypoint_scores / (distances + score_distance_offset) nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2) else: raise ValueError('Not recognized candidate_ranking_mode: %s' % candidate_ranking_mode) # Gather the coordinates and scores corresponding to the closest candidates. # Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and # [batch_size, num_instances, num_keypoints], respectively. (nearby_candidate_coords, nearby_candidate_scores, nearby_candidate_depths) = ( _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, nearby_candidate_inds, keypoint_depth_candidates)) if bboxes is None: # Filter out the chosen candidate with score lower than unmatched # keypoint score. mask = tf.cast(nearby_candidate_scores < keypoint_score_threshold, tf.int32) else: bboxes_flattened = tf.reshape(bboxes, [-1, 4]) # Scale the bounding boxes. # Shape [batch_size, num_instances, 4]. boxlist = box_list.BoxList(bboxes_flattened) boxlist_scaled = box_list_ops.scale_height_width( boxlist, box_scale, box_scale) bboxes_scaled = boxlist_scaled.get() bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4]) # Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint. # Shape [batch_size, num_instances, num_keypoints]. bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1]) ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3) # Produce a mask that indicates whether the original regressed keypoint # should be used instead of a candidate keypoint. # Shape [batch_size, num_instances, num_keypoints]. search_radius = ( tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale) mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) + tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) + # Filter out the chosen candidate with score lower than unmatched # keypoint score. tf.cast(nearby_candidate_scores < keypoint_score_threshold, tf.int32) + tf.cast(min_distances > search_radius, tf.int32)) mask = mask > 0 # Create refined keypoints where candidate keypoints replace original # regressed keypoints if they are in the vicinity of the regressed keypoints. # Shape [batch_size, num_instances, num_keypoints, 2]. refined_keypoints = tf.where( tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]), regressed_keypoints, nearby_candidate_coords) # Update keypoints scores. In the case where we use the original regressed # keypoints, we use a default score of `unmatched_keypoint_score`. # Shape [batch_size, num_instances, num_keypoints]. refined_scores = tf.where( mask, unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores), nearby_candidate_scores) refined_depths = None if nearby_candidate_depths is not None: refined_depths = tf.where(mask, tf.zeros_like(nearby_candidate_depths), nearby_candidate_depths) return refined_keypoints, refined_scores, refined_depths def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, num_total_keypoints): """Scatter keypoint elements into tensors with full keypoints dimension. Args: keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 tensor. keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 tensor. keypoint_inds: a list of integers that indicate the keypoint indices for this specific keypoint class. These indices are used to scatter into tensors that have a `num_total_keypoints` dimension. num_total_keypoints: The total number of keypoints that this model predicts. Returns: A tuple with keypoint_coords_padded: a [batch_size, num_instances, num_total_keypoints,2] float32 tensor. keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints] float32 tensor. """ batch_size, num_instances, _, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3]) kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1]) kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1) kpt_coords_scattered = tf.scatter_nd( indices=kpt_inds_tensor, updates=kpt_coords_transposed, shape=[num_total_keypoints, batch_size, num_instances, 2]) kpt_scores_scattered = tf.scatter_nd( indices=kpt_inds_tensor, updates=kpt_scores_transposed, shape=[num_total_keypoints, batch_size, num_instances]) keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3]) keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0]) return keypoint_coords_padded, keypoint_scores_padded def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, max_instances): """Scatter keypoint elements into tensors with full instance dimension. Args: keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 tensor. keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 tensor. instance_inds: a list of integers that indicate the instance indices for these keypoints. These indices are used to scatter into tensors that have a `max_instances` dimension. max_instances: The maximum number of instances detected by the model. Returns: A tuple with keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2] float32 tensor. keypoint_scores_padded: a [batch_size, max_instances, num_keypoints] float32 tensor. """ batch_size, _, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3]) kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2]) instance_inds = tf.expand_dims(instance_inds, axis=-1) kpt_coords_scattered = tf.scatter_nd( indices=instance_inds, updates=kpt_coords_transposed, shape=[max_instances, batch_size, num_keypoints, 2]) kpt_scores_scattered = tf.scatter_nd( indices=instance_inds, updates=kpt_scores_transposed, shape=[max_instances, batch_size, num_keypoints]) keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3]) keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2]) return keypoint_coords_padded, keypoint_scores_padded def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, indices, keypoint_depth_candidates=None): """Gathers keypoint candidate coordinates and scores at indices. Args: keypoint_candidates: a float tensor of shape [batch_size, max_candidates, num_keypoints, 2] with candidate coordinates. keypoint_scores: a float tensor of shape [batch_size, max_candidates, num_keypoints] with keypoint scores. indices: an integer tensor of shape [batch_size, num_indices, num_keypoints] with indices. keypoint_depth_candidates: (optional) a float tensor of shape [batch_size, max_candidates, num_keypoints] with keypoint depths. Returns: A tuple with gathered_keypoint_candidates: a float tensor of shape [batch_size, num_indices, num_keypoints, 2] with gathered coordinates. gathered_keypoint_scores: a float tensor of shape [batch_size, num_indices, num_keypoints]. gathered_keypoint_depths: a float tensor of shape [batch_size, num_indices, num_keypoints]. Return None if the input keypoint_depth_candidates is None. """ batch_size, num_indices, num_keypoints = _get_shape(indices, 3) # Transpose tensors so that all batch dimensions are up front. keypoint_candidates_transposed = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1]) nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1]) # TF Lite does not support tf.gather with batch_dims > 0, so we need to use # tf_gather_nd instead and here we prepare the indices for that. combined_indices = tf.stack([ _multi_range( batch_size, value_repetitions=num_keypoints * num_indices, dtype=tf.int64), _multi_range( num_keypoints, value_repetitions=num_indices, range_repetitions=batch_size, dtype=tf.int64), tf.reshape(nearby_candidate_inds_transposed, [-1]) ], axis=1) nearby_candidate_coords_transposed = tf.gather_nd( keypoint_candidates_transposed, combined_indices) nearby_candidate_coords_transposed = tf.reshape( nearby_candidate_coords_transposed, [batch_size, num_keypoints, num_indices, -1]) nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed, combined_indices) nearby_candidate_scores_transposed = tf.reshape( nearby_candidate_scores_transposed, [batch_size, num_keypoints, num_indices]) gathered_keypoint_candidates = tf.transpose( nearby_candidate_coords_transposed, [0, 2, 1, 3]) gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, [0, 2, 1]) gathered_keypoint_depths = None if keypoint_depth_candidates is not None: keypoint_depths_transposed = tf.transpose(keypoint_depth_candidates, [0, 2, 1]) nearby_candidate_depths_transposed = tf.gather_nd( keypoint_depths_transposed, combined_indices) nearby_candidate_depths_transposed = tf.reshape( nearby_candidate_depths_transposed, [batch_size, num_keypoints, num_indices]) gathered_keypoint_depths = tf.transpose(nearby_candidate_depths_transposed, [0, 2, 1]) return (gathered_keypoint_candidates, gathered_keypoint_scores, gathered_keypoint_depths) def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols): """Get the index in a flattened array given row and column indices.""" return (row_indices * num_cols) + col_indices def row_col_channel_indices_from_flattened_indices(indices, num_cols, num_channels): """Computes row, column and channel indices from flattened indices. Args: indices: An integer tensor of any shape holding the indices in the flattened space. num_cols: Number of columns in the image (width). num_channels: Number of channels in the image. Returns: row_indices: The row indices corresponding to each of the input indices. Same shape as indices. col_indices: The column indices corresponding to each of the input indices. Same shape as indices. channel_indices. The channel indices corresponding to each of the input indices. """ # Be careful with this function when running a model in float16 precision # (e.g. TF.js with WebGL) because the array indices may not be represented # accurately if they are too large, resulting in incorrect channel indices. # See: # https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_integer_values # # Avoid using mod operator to make the ops more easy to be compatible with # different environments, e.g. WASM. row_indices = (indices // num_channels) // num_cols col_indices = (indices // num_channels) - row_indices * num_cols channel_indices_temp = indices // num_channels channel_indices = indices - channel_indices_temp * num_channels return row_indices, col_indices, channel_indices def row_col_indices_from_flattened_indices(indices, num_cols): """Computes row and column indices from flattened indices. Args: indices: An integer tensor of any shape holding the indices in the flattened space. num_cols: Number of columns in the image (width). Returns: row_indices: The row indices corresponding to each of the input indices. Same shape as indices. col_indices: The column indices corresponding to each of the input indices. Same shape as indices. """ # Avoid using mod operator to make the ops more easy to be compatible with # different environments, e.g. WASM. row_indices = indices // num_cols col_indices = indices - row_indices * num_cols return row_indices, col_indices def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, width): """Computes valid anchor weights for an image assuming pixels will be flattened. This function is useful when we only want to penalize valid areas in the image in the case when padding is used. The function assumes that the loss function will be applied after flattening the spatial dimensions and returns anchor weights accordingly. Args: true_image_shapes: An integer tensor of shape [batch_size, 3] representing the true image shape (without padding) for each sample in the batch. height: height of the prediction from the network. width: width of the prediction from the network. Returns: valid_anchor_weights: a float tensor of shape [batch_size, height * width] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. """ indices = tf.reshape(tf.range(height * width), [1, -1]) batch_size = tf.shape(true_image_shapes)[0] batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices( batch_indices, width, 1) max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1] max_x = _to_float32(tf.expand_dims(max_x, 1)) max_y = _to_float32(tf.expand_dims(max_y, 1)) x_coords = _to_float32(x_coords) y_coords = _to_float32(y_coords) valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y) return _to_float32(valid_mask) def convert_strided_predictions_to_normalized_boxes(boxes, stride, true_image_shapes): """Converts predictions in the output space to normalized boxes. Boxes falling outside the valid image boundary are clipped to be on the boundary. Args: boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw coordinates of boxes in the model's output space. stride: The stride in the output space. true_image_shapes: A tensor of shape [batch_size, 3] representing the true shape of the input not considering padding. Returns: boxes: A tensor of shape [batch_size, num_boxes, 4] representing the coordinates of the normalized boxes. """ # Note: We use tf ops instead of functions in box_list_ops to make this # function compatible with dynamic batch size. boxes = boxes * stride true_image_shapes = tf.tile(true_image_shapes[:, tf.newaxis, :2], [1, 1, 2]) boxes = boxes / tf.cast(true_image_shapes, tf.float32) boxes = tf.clip_by_value(boxes, 0.0, 1.0) return boxes def convert_strided_predictions_to_normalized_keypoints( keypoint_coords, keypoint_scores, stride, true_image_shapes, clip_out_of_frame_keypoints=False): """Converts predictions in the output space to normalized keypoints. If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside the valid image boundary are normalized but not clipped; If clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the valid image boundary are clipped to the closest image boundary and the scores will be set to 0.0. Args: keypoint_coords: A tensor of shape [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates of keypoints in the model's output space. keypoint_scores: A tensor of shape [batch_size, num_instances, num_keypoints] holding the keypoint scores. stride: The stride in the output space. true_image_shapes: A tensor of shape [batch_size, 3] representing the true shape of the input not considering padding. clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside the image boundary should be clipped. If True, keypoint coords will be clipped to image boundary. If False, keypoints are normalized but not filtered based on their location. Returns: keypoint_coords_normalized: A tensor of shape [batch_size, num_instances, num_keypoints, 2] representing the coordinates of the normalized keypoints. keypoint_scores: A tensor of shape [batch_size, num_instances, num_keypoints] representing the updated keypoint scores. """ # Flatten keypoints and scores. batch_size, _, _, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) # Scale and normalize keypoints. true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) yscale = float(stride) / tf.cast(true_heights, tf.float32) xscale = float(stride) / tf.cast(true_widths, tf.float32) yx_scale = tf.stack([yscale, xscale], axis=1) keypoint_coords_normalized = keypoint_coords * tf.reshape( yx_scale, [batch_size, 1, 1, 2]) if clip_out_of_frame_keypoints: # Determine the keypoints that are in the true image regions. valid_indices = tf.logical_and( tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0, keypoint_coords_normalized[:, :, :, 0] <= 1.0), tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0, keypoint_coords_normalized[:, :, :, 1] <= 1.0)) batch_window = tf.tile( tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), multiples=[batch_size, 1]) def clip_to_window(inputs): keypoints, window = inputs return keypoint_ops.clip_to_window(keypoints, window) # Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite # compatible. kpts_dims = _get_shape(keypoint_coords_normalized, 4) output_spec = tf.TensorSpec( shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32) keypoint_coords_normalized = tf.map_fn( clip_to_window, (keypoint_coords_normalized, batch_window), dtype=tf.float32, back_prop=False, fn_output_signature=output_spec) keypoint_scores = tf.where(valid_indices, keypoint_scores, tf.zeros_like(keypoint_scores)) return keypoint_coords_normalized, keypoint_scores def convert_strided_predictions_to_instance_masks( boxes, classes, masks, true_image_shapes, densepose_part_heatmap=None, densepose_surface_coords=None, stride=4, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=-1): """Converts predicted full-image masks into instance masks. For each predicted detection box: * Crop and resize the predicted mask (and optionally DensePose coordinates) based on the detected bounding box coordinates and class prediction. Uses bilinear resampling. * Binarize the mask using the provided score threshold. Args: boxes: A tensor of shape [batch, max_detections, 4] holding the predicted boxes, in normalized coordinates (relative to the true image dimensions). classes: An integer tensor of shape [batch, max_detections] containing the detected class for each box (0-indexed). masks: A [batch, output_height, output_width, num_classes] float32 tensor with class probabilities. true_image_shapes: A tensor of shape [batch, 3] representing the true shape of the inputs not considering padding. densepose_part_heatmap: (Optional) A [batch, output_height, output_width, num_parts] float32 tensor with part scores (i.e. logits). densepose_surface_coords: (Optional) A [batch, output_height, output_width, 2 * num_parts] float32 tensor with predicted part coordinates (in vu-format). stride: The stride in the output space. mask_height: The desired resized height for instance masks. mask_width: The desired resized width for instance masks. score_threshold: The threshold at which to convert predicted mask into foreground pixels. densepose_class_index: The class index (0-indexed) corresponding to the class which has DensePose labels (e.g. person class). Returns: A tuple of masks and surface_coords. instance_masks: A [batch_size, max_detections, mask_height, mask_width] uint8 tensor with predicted foreground mask for each instance. If DensePose tensors are provided, then each pixel value in the mask encodes the 1-indexed part. surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2] float32 tensor with (v, u) coordinates. Note that v, u coordinates are only defined on instance masks, and the coordinates at each location of the foreground mask correspond to coordinates on a local part coordinate system (the specific part can be inferred from the `instance_masks` output. If DensePose feature maps are not passed to this function, this output will be None. Raises: ValueError: If one but not both of `densepose_part_heatmap` and `densepose_surface_coords` is provided. """ batch_size, output_height, output_width, _ = ( shape_utils.combined_static_and_dynamic_shape(masks)) input_height = stride * output_height input_width = stride * output_width true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) # If necessary, create dummy DensePose tensors to simplify the map function. densepose_present = True if ((densepose_part_heatmap is not None) ^ (densepose_surface_coords is not None)): raise ValueError('To use DensePose, both `densepose_part_heatmap` and ' '`densepose_surface_coords` must be provided') if densepose_part_heatmap is None and densepose_surface_coords is None: densepose_present = False densepose_part_heatmap = tf.zeros( (batch_size, output_height, output_width, 1), dtype=tf.float32) densepose_surface_coords = tf.zeros( (batch_size, output_height, output_width, 2), dtype=tf.float32) crop_and_threshold_fn = functools.partial( crop_and_threshold_masks, input_height=input_height, input_width=input_width, mask_height=mask_height, mask_width=mask_width, score_threshold=score_threshold, densepose_class_index=densepose_class_index) instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn( crop_and_threshold_fn, elems=[boxes, classes, masks, densepose_part_heatmap, densepose_surface_coords, true_heights, true_widths], dtype=[tf.uint8, tf.float32], back_prop=False) surface_coords = surface_coords if densepose_present else None return instance_masks, surface_coords def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256, mask_width=256, score_threshold=0.5, densepose_class_index=-1): """Crops and thresholds masks based on detection boxes. Args: elems: A tuple of boxes - float32 tensor of shape [max_detections, 4] classes - int32 tensor of shape [max_detections] (0-indexed) masks - float32 tensor of shape [output_height, output_width, num_classes] part_heatmap - float32 tensor of shape [output_height, output_width, num_parts] surf_coords - float32 tensor of shape [output_height, output_width, 2 * num_parts] true_height - scalar int tensor true_width - scalar int tensor input_height: Input height to network. input_width: Input width to network. mask_height: Height for resizing mask crops. mask_width: Width for resizing mask crops. score_threshold: The threshold at which to convert predicted mask into foreground pixels. densepose_class_index: scalar int tensor with the class index (0-indexed) for DensePose. Returns: A tuple of all_instances: A [max_detections, mask_height, mask_width] uint8 tensor with a predicted foreground mask for each instance. Background is encoded as 0, and foreground is encoded as a positive integer. Specific part indices are encoded as 1-indexed parts (for classes that have part information). surface_coords: A [max_detections, mask_height, mask_width, 2] float32 tensor with (v, u) coordinates. for each part. """ (boxes, classes, masks, part_heatmap, surf_coords, true_height, true_width) = elems # Boxes are in normalized coordinates relative to true image shapes. Convert # coordinates to be normalized relative to input image shapes (since masks # may still have padding). boxlist = box_list.BoxList(boxes) y_scale = true_height / input_height x_scale = true_width / input_width boxlist = box_list_ops.scale(boxlist, y_scale, x_scale) boxes = boxlist.get() # Convert masks from [output_height, output_width, num_classes] to # [num_classes, output_height, output_width, 1]. num_classes = tf.shape(masks)[-1] masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis] # Tile part and surface coordinate masks for all classes. part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1]) surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :], multiples=[num_classes, 1, 1, 1]) feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d], axis=-1) # The following tensor has shape # [max_detections, mask_height, mask_width, 1 + 3 * num_parts]. cropped_masks = tf2.image.crop_and_resize( feature_maps_concat, boxes=boxes, box_indices=classes, crop_size=[mask_height, mask_width], method='bilinear') # Split the cropped masks back into instance masks, part masks, and surface # coordinates. num_parts = tf.shape(part_heatmap)[-1] instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split( cropped_masks, [1, num_parts, 2 * num_parts], axis=-1) # Threshold the instance masks. Resulting tensor has shape # [max_detections, mask_height, mask_width, 1]. instance_masks_int = tf.cast( tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32) # Produce a binary mask that is 1.0 only: # - in the foreground region for an instance # - in detections corresponding to the DensePose class det_with_parts = tf.equal(classes, densepose_class_index) det_with_parts = tf.cast( tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32) instance_masks_with_parts = tf.math.multiply(instance_masks_int, det_with_parts) # Similarly, produce a binary mask that holds the foreground masks only for # instances without parts (i.e. non-DensePose classes). det_without_parts = 1 - det_with_parts instance_masks_without_parts = tf.math.multiply(instance_masks_int, det_without_parts) # Assemble a tensor that has standard instance segmentation masks for # non-DensePose classes (with values in [0, 1]), and part segmentation masks # for DensePose classes (with vaues in [0, 1, ..., num_parts]). part_mask_int_zero_indexed = tf.math.argmax( part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis] part_mask_int_one_indexed = part_mask_int_zero_indexed + 1 all_instances = (instance_masks_without_parts + instance_masks_with_parts * part_mask_int_one_indexed) # Gather the surface coordinates for the parts. surface_coords_cropped = tf.reshape( surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2]) surface_coords = gather_surface_coords_for_parts(surface_coords_cropped, part_mask_int_zero_indexed) surface_coords = ( surface_coords * tf.cast(instance_masks_with_parts, tf.float32)) return [tf.squeeze(all_instances, axis=3), surface_coords] def gather_surface_coords_for_parts(surface_coords_cropped, highest_scoring_part): """Gathers the (v, u) coordinates for the highest scoring DensePose parts. Args: surface_coords_cropped: A [max_detections, height, width, num_parts, 2] float32 tensor with (v, u) surface coordinates. highest_scoring_part: A [max_detections, height, width] integer tensor with the highest scoring part (0-indexed) indices for each location. Returns: A [max_detections, height, width, 2] float32 tensor with the (v, u) coordinates selected from the highest scoring parts. """ max_detections, height, width, num_parts, _ = ( shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped)) flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2]) flattened_part_ids = tf.reshape(highest_scoring_part, [-1]) # Produce lookup indices that represent the locations of the highest scoring # parts in the `flattened_surface_coords` tensor. flattened_lookup_indices = ( num_parts * tf.range(max_detections * height * width) + flattened_part_ids) vu_coords_flattened = tf.gather(flattened_surface_coords, flattened_lookup_indices, axis=0) return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2]) def predicted_embeddings_at_object_centers(embedding_predictions, y_indices, x_indices): """Returns the predicted embeddings at specified object centers. Args: embedding_predictions: A float tensor of shape [batch_size, height, width, reid_embed_size] holding predicted embeddings. y_indices: A [batch, num_instances] int tensor holding y indices for object centers. These indices correspond to locations in the output feature map. x_indices: A [batch, num_instances] int tensor holding x indices for object centers. These indices correspond to locations in the output feature map. Returns: A float tensor of shape [batch_size, num_objects, reid_embed_size] where predicted embeddings are gathered at the provided locations. """ batch_size, _, width, _ = _get_shape(embedding_predictions, 4) flattened_indices = flattened_indices_from_row_col_indices( y_indices, x_indices, width) _, num_instances = _get_shape(flattened_indices, 2) embeddings_flat = _flatten_spatial_dimensions(embedding_predictions) embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1) embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1]) return embeddings class ObjectDetectionParams( collections.namedtuple('ObjectDetectionParams', [ 'localization_loss', 'scale_loss_weight', 'offset_loss_weight', 'task_loss_weight', 'scale_head_num_filters', 'scale_head_kernel_sizes', 'offset_head_num_filters', 'offset_head_kernel_sizes' ])): """Namedtuple to host object detection related parameters. This is a wrapper class over the fields that are either the hyper-parameters or the loss functions needed for the object detection task. The class is immutable after constructed. Please see the __new__ function for detailed information for each fields. """ __slots__ = () def __new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight=1.0, scale_head_num_filters=(256), scale_head_kernel_sizes=(3), offset_head_num_filters=(256), offset_head_kernel_sizes=(3)): """Constructor with default values for ObjectDetectionParams. Args: localization_loss: a object_detection.core.losses.Loss object to compute the loss for the center offset and height/width predictions in CenterNet. scale_loss_weight: float, The weight for localizing box size. Note that the scale loss is dependent on the input image size, since we penalize the raw height and width. This constant may need to be adjusted depending on the input size. offset_loss_weight: float, The weight for localizing center offsets. task_loss_weight: float, the weight of the object detection loss. scale_head_num_filters: filter numbers of the convolutional layers used by the object detection box scale prediction head. scale_head_kernel_sizes: kernel size of the convolutional layers used by the object detection box scale prediction head. offset_head_num_filters: filter numbers of the convolutional layers used by the object detection box offset prediction head. offset_head_kernel_sizes: kernel size of the convolutional layers used by the object detection box offset prediction head. Returns: An initialized ObjectDetectionParams namedtuple. """ return super(ObjectDetectionParams, cls).__new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight, scale_head_num_filters, scale_head_kernel_sizes, offset_head_num_filters, offset_head_kernel_sizes) class KeypointEstimationParams( collections.namedtuple('KeypointEstimationParams', [ 'task_name', 'class_id', 'keypoint_indices', 'classification_loss', 'localization_loss', 'keypoint_labels', 'keypoint_std_dev', 'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight', 'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold', 'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight', 'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale', 'candidate_search_scale', 'candidate_ranking_mode', 'offset_peak_radius', 'per_keypoint_offset', 'predict_depth', 'per_keypoint_depth', 'keypoint_depth_loss_weight', 'score_distance_offset', 'clip_out_of_frame_keypoints', 'rescore_instances', 'heatmap_head_num_filters', 'heatmap_head_kernel_sizes', 'offset_head_num_filters', 'offset_head_kernel_sizes', 'regress_head_num_filters', 'regress_head_kernel_sizes' ])): """Namedtuple to host object detection related parameters. This is a wrapper class over the fields that are either the hyper-parameters or the loss functions needed for the keypoint estimation task. The class is immutable after constructed. Please see the __new__ function for detailed information for each fields. """ __slots__ = () def __new__(cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels=None, keypoint_std_dev=None, keypoint_heatmap_loss_weight=1.0, keypoint_offset_loss_weight=1.0, keypoint_regression_loss_weight=1.0, keypoint_candidate_score_threshold=0.1, heatmap_bias_init=-2.19, num_candidates_per_keypoint=100, task_loss_weight=1.0, peak_max_pool_kernel_size=3, unmatched_keypoint_score=0.1, box_scale=1.2, candidate_search_scale=0.3, candidate_ranking_mode='min_distance', offset_peak_radius=0, per_keypoint_offset=False, predict_depth=False, per_keypoint_depth=False, keypoint_depth_loss_weight=1.0, score_distance_offset=1e-6, clip_out_of_frame_keypoints=False, rescore_instances=False, heatmap_head_num_filters=(256), heatmap_head_kernel_sizes=(3), offset_head_num_filters=(256), offset_head_kernel_sizes=(3), regress_head_num_filters=(256), regress_head_kernel_sizes=(3)): """Constructor with default values for KeypointEstimationParams. Args: task_name: string, the name of the task this namedtuple corresponds to. Note that it should be an unique identifier of the task. class_id: int, the ID of the class that contains the target keypoints to considered in this task. For example, if the task is human pose estimation, the class id should correspond to the "human" class. Note that the ID is 0-based, meaning that class 0 corresponds to the first non-background object class. keypoint_indices: A list of integers representing the indicies of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. localization_loss: an object_detection.core.losses.Loss object to compute the loss for the center offset and height/width predictions in CenterNet. keypoint_labels: A list of strings representing the label text of each keypoint, e.g. "nose", 'left_shoulder". Note that the length of this list should be equal to keypoint_indices. keypoint_std_dev: A list of float represent the standard deviation of the Gaussian kernel used to generate the keypoint heatmap. It is to provide the flexibility of using different sizes of Gaussian kernel for each keypoint class. keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap. keypoint_offset_loss_weight: float, The weight for the keypoint offsets loss. keypoint_regression_loss_weight: float, The weight for keypoint regression loss. Note that the loss is dependent on the input image size, since we penalize the raw height and width. This constant may need to be adjusted depending on the input size. keypoint_candidate_score_threshold: float, The heatmap score threshold for a keypoint to become a valid candidate. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the class prediction head. If set to None, the bias is initialized with zeros. num_candidates_per_keypoint: The maximum number of candidates to retrieve for each keypoint. task_loss_weight: float, the weight of the keypoint estimation loss. peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak score locations in a neighborhood (independently for each keypoint types). unmatched_keypoint_score: The default score to use for regressed keypoints that are not successfully snapped to a nearby candidate. box_scale: The multiplier to expand the bounding boxes (either the provided boxes or those which tightly cover the regressed keypoints). candidate_search_scale: The scale parameter that multiplies the largest dimension of a bounding box. The resulting distance becomes a search radius for candidates in the vicinity of each regressed keypoint. candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio'] indicating how to select the keypoint candidate. offset_peak_radius: The radius (in the unit of output pixel) around groundtruth heatmap peak to assign the offset targets. If set 0, then the offset target will only be assigned to the heatmap peak (same behavior as the original paper). per_keypoint_offset: A bool indicates whether to assign offsets for each keypoint channel separately. If set False, the output offset target has the shape [batch_size, out_height, out_width, 2] (same behavior as the original paper). If set True, the output offset target has the shape [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when the offset_peak_radius is not zero). predict_depth: A bool indicates whether to predict the depth of each keypoints. per_keypoint_depth: A bool indicates whether the model predicts the depth of each keypoints in independent channels. Similar to per_keypoint_offset but for the keypoint depth. keypoint_depth_loss_weight: The weight of the keypoint depth loss. score_distance_offset: The distance offset to apply in the denominator when candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize in this scenario is score / (distance + score_distance_offset). Larger values of score_distance_offset make the keypoint score gain more relative importance. clip_out_of_frame_keypoints: Whether keypoints outside the image frame should be clipped back to the image boundary. If True, the keypoints that are clipped have scores set to 0.0. rescore_instances: Whether to rescore instances based on a combination of detection score and keypoint scores. heatmap_head_num_filters: filter numbers of the convolutional layers used by the keypoint heatmap prediction head. heatmap_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint heatmap prediction head. offset_head_num_filters: filter numbers of the convolutional layers used by the keypoint offset prediction head. offset_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint offset prediction head. regress_head_num_filters: filter numbers of the convolutional layers used by the keypoint regression prediction head. regress_head_kernel_sizes: kernel size of the convolutional layers used by the keypoint regression prediction head. Returns: An initialized KeypointEstimationParams namedtuple. """ return super(KeypointEstimationParams, cls).__new__( cls, task_name, class_id, keypoint_indices, classification_loss, localization_loss, keypoint_labels, keypoint_std_dev, keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, keypoint_regression_loss_weight, keypoint_candidate_score_threshold, heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, candidate_search_scale, candidate_ranking_mode, offset_peak_radius, per_keypoint_offset, predict_depth, per_keypoint_depth, keypoint_depth_loss_weight, score_distance_offset, clip_out_of_frame_keypoints, rescore_instances, heatmap_head_num_filters, heatmap_head_kernel_sizes, offset_head_num_filters, offset_head_kernel_sizes, regress_head_num_filters, regress_head_kernel_sizes) class ObjectCenterParams( collections.namedtuple('ObjectCenterParams', [ 'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init', 'min_box_overlap_iou', 'max_box_predictions', 'use_labeled_classes', 'keypoint_weights_for_center', 'center_head_num_filters', 'center_head_kernel_sizes' ])): """Namedtuple to store object center prediction related parameters.""" __slots__ = () def __new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init=-2.19, min_box_overlap_iou=0.7, max_box_predictions=100, use_labeled_classes=False, keypoint_weights_for_center=None, center_head_num_filters=(256), center_head_kernel_sizes=(3)): """Constructor with default values for ObjectCenterParams. Args: classification_loss: an object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. object_center_loss_weight: float, The weight for the object center loss. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the object center prediction head. If set to None, the bias is initialized with zeros. min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes need have with groundtruth boxes to not be penalized. This is used for computing the class specific center heatmaps. max_box_predictions: int, the maximum number of boxes to predict. use_labeled_classes: boolean, compute the loss only labeled classes. keypoint_weights_for_center: (optional) The keypoint weights used for calculating the location of object center. If provided, the number of weights need to be the same as the number of keypoints. The object center is calculated by the weighted mean of the keypoint locations. If not provided, the object center is determined by the center of the bounding box (default behavior). center_head_num_filters: filter numbers of the convolutional layers used by the object center prediction head. center_head_kernel_sizes: kernel size of the convolutional layers used by the object center prediction head. Returns: An initialized ObjectCenterParams namedtuple. """ return super(ObjectCenterParams, cls).__new__(cls, classification_loss, object_center_loss_weight, heatmap_bias_init, min_box_overlap_iou, max_box_predictions, use_labeled_classes, keypoint_weights_for_center, center_head_num_filters, center_head_kernel_sizes) class MaskParams( collections.namedtuple('MaskParams', [ 'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width', 'score_threshold', 'heatmap_bias_init', 'mask_head_num_filters', 'mask_head_kernel_sizes' ])): """Namedtuple to store mask prediction related parameters.""" __slots__ = () def __new__(cls, classification_loss, task_loss_weight=1.0, mask_height=256, mask_width=256, score_threshold=0.5, heatmap_bias_init=-2.19, mask_head_num_filters=(256), mask_head_kernel_sizes=(3)): """Constructor with default values for MaskParams. Args: classification_loss: an object_detection.core.losses.Loss object to compute the loss for the semantic segmentation predictions in CenterNet. task_loss_weight: float, The loss weight for the segmentation task. mask_height: The height of the resized instance segmentation mask. mask_width: The width of the resized instance segmentation mask. score_threshold: The threshold at which to convert predicted mask probabilities (after passing through sigmoid) into foreground pixels. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the semantic segmentation prediction head. If set to None, the bias is initialized with zeros. mask_head_num_filters: filter numbers of the convolutional layers used by the mask prediction head. mask_head_kernel_sizes: kernel size of the convolutional layers used by the mask prediction head. Returns: An initialized MaskParams namedtuple. """ return super(MaskParams, cls).__new__(cls, classification_loss, task_loss_weight, mask_height, mask_width, score_threshold, heatmap_bias_init, mask_head_num_filters, mask_head_kernel_sizes) class DensePoseParams( collections.namedtuple('DensePoseParams', [ 'class_id', 'classification_loss', 'localization_loss', 'part_loss_weight', 'coordinate_loss_weight', 'num_parts', 'task_loss_weight', 'upsample_to_input_res', 'upsample_method', 'heatmap_bias_init' ])): """Namedtuple to store DensePose prediction related parameters.""" __slots__ = () def __new__(cls, class_id, classification_loss, localization_loss, part_loss_weight=1.0, coordinate_loss_weight=1.0, num_parts=24, task_loss_weight=1.0, upsample_to_input_res=True, upsample_method='bilinear', heatmap_bias_init=-2.19): """Constructor with default values for DensePoseParams. Args: class_id: the ID of the class that contains the DensePose groundtruth. This should typically correspond to the "person" class. Note that the ID is 0-based, meaning that class 0 corresponds to the first non-background object class. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the body part predictions in CenterNet. localization_loss: an object_detection.core.losses.Loss object to compute the loss for the surface coordinate regression in CenterNet. part_loss_weight: The loss weight to apply to part prediction. coordinate_loss_weight: The loss weight to apply to surface coordinate prediction. num_parts: The number of DensePose parts to predict. task_loss_weight: float, the loss weight for the DensePose task. upsample_to_input_res: Whether to upsample the DensePose feature maps to the input resolution before applying loss. Note that the prediction outputs are still at the standard CenterNet output stride. upsample_method: Method for upsampling DensePose feature maps. Options are either 'bilinear' or 'nearest'). This takes no effect when `upsample_to_input_res` is False. heatmap_bias_init: float, the initial value of bias in the convolutional kernel of the part prediction head. If set to None, the bias is initialized with zeros. Returns: An initialized DensePoseParams namedtuple. """ return super(DensePoseParams, cls).__new__(cls, class_id, classification_loss, localization_loss, part_loss_weight, coordinate_loss_weight, num_parts, task_loss_weight, upsample_to_input_res, upsample_method, heatmap_bias_init) class TrackParams( collections.namedtuple('TrackParams', [ 'num_track_ids', 'reid_embed_size', 'num_fc_layers', 'classification_loss', 'task_loss_weight' ])): """Namedtuple to store tracking prediction related parameters.""" __slots__ = () def __new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight=1.0): """Constructor with default values for TrackParams. Args: num_track_ids: int. The maximum track ID in the dataset. Used for ReID embedding classification task. reid_embed_size: int. The embedding size for ReID task. num_fc_layers: int. The number of (fully-connected, batch-norm, relu) layers for track ID classification head. classification_loss: an object_detection.core.losses.Loss object to compute the loss for the ReID embedding in CenterNet. task_loss_weight: float, the loss weight for the tracking task. Returns: An initialized TrackParams namedtuple. """ return super(TrackParams, cls).__new__(cls, num_track_ids, reid_embed_size, num_fc_layers, classification_loss, task_loss_weight) class TemporalOffsetParams( collections.namedtuple('TemporalOffsetParams', [ 'localization_loss', 'task_loss_weight' ])): """Namedtuple to store temporal offset related parameters.""" __slots__ = () def __new__(cls, localization_loss, task_loss_weight=1.0): """Constructor with default values for TrackParams. Args: localization_loss: an object_detection.core.losses.Loss object to compute the loss for the temporal offset in CenterNet. task_loss_weight: float, the loss weight for the temporal offset task. Returns: An initialized TemporalOffsetParams namedtuple. """ return super(TemporalOffsetParams, cls).__new__(cls, localization_loss, task_loss_weight) # The following constants are used to generate the keys of the # (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch # class. DETECTION_TASK = 'detection_task' OBJECT_CENTER = 'object_center' BOX_SCALE = 'box/scale' BOX_OFFSET = 'box/offset' KEYPOINT_REGRESSION = 'keypoint/regression' KEYPOINT_HEATMAP = 'keypoint/heatmap' KEYPOINT_OFFSET = 'keypoint/offset' KEYPOINT_DEPTH = 'keypoint/depth' SEGMENTATION_TASK = 'segmentation_task' SEGMENTATION_HEATMAP = 'segmentation/heatmap' DENSEPOSE_TASK = 'densepose_task' DENSEPOSE_HEATMAP = 'densepose/heatmap' DENSEPOSE_REGRESSION = 'densepose/regression' LOSS_KEY_PREFIX = 'Loss' TRACK_TASK = 'track_task' TRACK_REID = 'track/reid' TEMPORALOFFSET_TASK = 'temporal_offset_task' TEMPORAL_OFFSET = 'track/offset' def get_keypoint_name(task_name, head_name): return '%s/%s' % (task_name, head_name) def get_num_instances_from_weights(groundtruth_weights_list): """Computes the number of instances/boxes from the weights in a batch. Args: groundtruth_weights_list: A list of float tensors with shape [max_num_instances] representing whether there is an actual instance in the image (with non-zero value) or is padded to match the max_num_instances (with value 0.0). The list represents the batch dimension. Returns: A scalar integer tensor incidating how many instances/boxes are in the images in the batch. Note that this function is usually used to normalize the loss so the minimum return value is 1 to avoid weird behavior. """ num_instances = tf.reduce_sum( [tf.math.count_nonzero(w) for w in groundtruth_weights_list]) num_instances = tf.maximum(num_instances, 1) return num_instances class CenterNetMetaArch(model.DetectionModel): """The CenterNet meta architecture [1]. [1]: https://arxiv.org/abs/1904.07850 """ def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params=None, keypoint_params_dict=None, mask_params=None, densepose_params=None, track_params=None, temporal_offset_params=None, use_depthwise=False, compute_heatmap_sparse=False, non_max_suppression_fn=None, unit_height_conv=False): """Initializes a CenterNet model. Args: is_training: Set to True if this model is being built for training. add_summaries: Whether to add tf summaries in the model. num_classes: int, The number of classes that the model should predict. feature_extractor: A CenterNetFeatureExtractor to use to extract features from an image. image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions and a 1-D tensor of shape [3] indicating shape of true image within the resized image tensor as the resized image tensor could be padded. See builders/image_resizer_builder.py. object_center_params: An ObjectCenterParams namedtuple. This object holds the hyper-parameters for object center prediction. This is required by either object detection or keypoint estimation tasks. object_detection_params: An ObjectDetectionParams namedtuple. This object holds the hyper-parameters necessary for object detection. Please see the class definition for more details. keypoint_params_dict: A dictionary that maps from task name to the corresponding KeypointEstimationParams namedtuple. This object holds the hyper-parameters necessary for multiple keypoint estimations. Please see the class definition for more details. mask_params: A MaskParams namedtuple. This object holds the hyper-parameters for segmentation. Please see the class definition for more details. densepose_params: A DensePoseParams namedtuple. This object holds the hyper-parameters for DensePose prediction. Please see the class definition for more details. Note that if this is provided, it is expected that `mask_params` is also provided. track_params: A TrackParams namedtuple. This object holds the hyper-parameters for tracking. Please see the class definition for more details. temporal_offset_params: A TemporalOffsetParams namedtuple. This object holds the hyper-parameters for offset prediction based tracking. use_depthwise: If true, all task heads will be constructed using separable_conv. Otherwise, standard convoltuions will be used. compute_heatmap_sparse: bool, whether or not to use the sparse version of the Op that computes the center heatmaps. The sparse version scales better with number of channels in the heatmap, but in some cases is known to cause an OOM error. See b/170989061. non_max_suppression_fn: Optional Non Max Suppression function to apply. unit_height_conv: If True, Conv2Ds in prediction heads have asymmetric kernels with height=1. """ assert object_detection_params or keypoint_params_dict # Shorten the name for convenience and better formatting. self._is_training = is_training # The Objects as Points paper attaches loss functions to multiple # (`num_feature_outputs`) feature maps in the the backbone. E.g. # for the hourglass backbone, `num_feature_outputs` is 2. self._num_classes = num_classes self._feature_extractor = feature_extractor self._num_feature_outputs = feature_extractor.num_feature_outputs self._stride = self._feature_extractor.out_stride self._image_resizer_fn = image_resizer_fn self._center_params = object_center_params self._od_params = object_detection_params self._kp_params_dict = keypoint_params_dict self._mask_params = mask_params if densepose_params is not None and mask_params is None: raise ValueError('To run DensePose prediction, `mask_params` must also ' 'be supplied.') self._densepose_params = densepose_params self._track_params = track_params self._temporal_offset_params = temporal_offset_params self._use_depthwise = use_depthwise self._compute_heatmap_sparse = compute_heatmap_sparse # subclasses may not implement the unit_height_conv arg, so only provide it # as a kwarg if it is True. kwargs = {'unit_height_conv': unit_height_conv} if unit_height_conv else {} # Construct the prediction head nets. self._prediction_head_dict = self._construct_prediction_heads( num_classes, self._num_feature_outputs, class_prediction_bias_init=self._center_params.heatmap_bias_init, **kwargs) # Initialize the target assigners. self._target_assigner_dict = self._initialize_target_assigners( stride=self._stride, min_box_overlap_iou=self._center_params.min_box_overlap_iou) # Will be used in VOD single_frame_meta_arch for tensor reshape. self._batched_prediction_tensor_names = [] self._non_max_suppression_fn = non_max_suppression_fn super(CenterNetMetaArch, self).__init__(num_classes) @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names def _make_prediction_net_list(self, num_feature_outputs, num_out_channels, kernel_sizes=(3), num_filters=(256), bias_fill=None, name=None, unit_height_conv=False): prediction_net_list = [] for i in range(num_feature_outputs): prediction_net_list.append( make_prediction_net( num_out_channels, kernel_sizes=kernel_sizes, num_filters=num_filters, bias_fill=bias_fill, use_depthwise=self._use_depthwise, name='{}_{}'.format(name, i) if name else name, unit_height_conv=unit_height_conv)) return prediction_net_list def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init, unit_height_conv=False): """Constructs the prediction heads based on the specific parameters. Args: num_classes: An integer indicating how many classes in total to predict. num_feature_outputs: An integer indicating how many feature outputs to use for calculating the loss. The Objects as Points paper attaches loss functions to multiple (`num_feature_outputs`) feature maps in the the backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2. class_prediction_bias_init: float, the initial value of bias in the convolutional kernel of the class prediction head. If set to None, the bias is initialized with zeros. unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1. Returns: A dictionary of keras modules generated by calling make_prediction_net function. It will also create and set a private member of the class when learning the tracking task. """ prediction_heads = {} prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list( num_feature_outputs, num_classes, kernel_sizes=self._center_params.center_head_kernel_sizes, num_filters=self._center_params.center_head_num_filters, bias_fill=class_prediction_bias_init, name='center', unit_height_conv=unit_height_conv) if self._od_params is not None: prediction_heads[BOX_SCALE] = self._make_prediction_net_list( num_feature_outputs, NUM_SIZE_CHANNELS, kernel_sizes=self._od_params.scale_head_kernel_sizes, num_filters=self._od_params.scale_head_num_filters, name='box_scale', unit_height_conv=unit_height_conv) prediction_heads[BOX_OFFSET] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, kernel_sizes=self._od_params.offset_head_kernel_sizes, num_filters=self._od_params.offset_head_num_filters, name='box_offset', unit_height_conv=unit_height_conv) if self._kp_params_dict is not None: for task_name, kp_params in self._kp_params_dict.items(): num_keypoints = len(kp_params.keypoint_indices) prediction_heads[get_keypoint_name( task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list( num_feature_outputs, num_keypoints, kernel_sizes=kp_params.heatmap_head_kernel_sizes, num_filters=kp_params.heatmap_head_num_filters, bias_fill=kp_params.heatmap_bias_init, name='kpt_heatmap', unit_height_conv=unit_height_conv) prediction_heads[get_keypoint_name( task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS * num_keypoints, kernel_sizes=kp_params.regress_head_kernel_sizes, num_filters=kp_params.regress_head_num_filters, name='kpt_regress', unit_height_conv=unit_height_conv) if kp_params.per_keypoint_offset: prediction_heads[get_keypoint_name( task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS * num_keypoints, kernel_sizes=kp_params.offset_head_kernel_sizes, num_filters=kp_params.offset_head_num_filters, name='kpt_offset', unit_height_conv=unit_height_conv) else: prediction_heads[get_keypoint_name( task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, kernel_sizes=kp_params.offset_head_kernel_sizes, num_filters=kp_params.offset_head_num_filters, name='kpt_offset', unit_height_conv=unit_height_conv) if kp_params.predict_depth: num_depth_channel = ( num_keypoints if kp_params.per_keypoint_depth else 1) prediction_heads[get_keypoint_name( task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list( num_feature_outputs, num_depth_channel, name='kpt_depth', unit_height_conv=unit_height_conv) if self._mask_params is not None: prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list( num_feature_outputs, num_classes, kernel_sizes=self._mask_params.mask_head_kernel_sizes, num_filters=self._mask_params.mask_head_num_filters, bias_fill=self._mask_params.heatmap_bias_init, name='seg_heatmap', unit_height_conv=unit_height_conv) if self._densepose_params is not None: prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list( num_feature_outputs, self._densepose_params.num_parts, bias_fill=self._densepose_params.heatmap_bias_init, name='dense_pose_heatmap', unit_height_conv=unit_height_conv) prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list( num_feature_outputs, 2 * self._densepose_params.num_parts, name='dense_pose_regress', unit_height_conv=unit_height_conv) if self._track_params is not None: prediction_heads[TRACK_REID] = self._make_prediction_net_list( num_feature_outputs, self._track_params.reid_embed_size, name='track_reid', unit_height_conv=unit_height_conv) # Creates a classification network to train object embeddings by learning # a projection from embedding space to object track ID space. self.track_reid_classification_net = tf.keras.Sequential() for _ in range(self._track_params.num_fc_layers - 1): self.track_reid_classification_net.add( tf.keras.layers.Dense(self._track_params.reid_embed_size, input_shape=( self._track_params.reid_embed_size,))) self.track_reid_classification_net.add( tf.keras.layers.BatchNormalization()) self.track_reid_classification_net.add(tf.keras.layers.ReLU()) self.track_reid_classification_net.add( tf.keras.layers.Dense(self._track_params.num_track_ids, input_shape=( self._track_params.reid_embed_size,))) if self._temporal_offset_params is not None: prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list( num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset', unit_height_conv=unit_height_conv) return prediction_heads def _initialize_target_assigners(self, stride, min_box_overlap_iou): """Initializes the target assigners and puts them in a dictionary. Args: stride: An integer indicating the stride of the image. min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes need have with groundtruth boxes to not be penalized. This is used for computing the class specific center heatmaps. Returns: A dictionary of initialized target assigners for each task. """ target_assigners = {} keypoint_weights_for_center = ( self._center_params.keypoint_weights_for_center) if not keypoint_weights_for_center: target_assigners[OBJECT_CENTER] = ( cn_assigner.CenterNetCenterHeatmapTargetAssigner( stride, min_box_overlap_iou, self._compute_heatmap_sparse)) self._center_from_keypoints = False else: # Determining the object center location by keypoint location is only # supported when there is exactly one keypoint prediction task and no # object detection task is specified. assert len(self._kp_params_dict) == 1 and self._od_params is None kp_params = next(iter(self._kp_params_dict.values())) # The number of keypoint_weights_for_center needs to be the same as the # number of keypoints. assert len(keypoint_weights_for_center) == len(kp_params.keypoint_indices) target_assigners[OBJECT_CENTER] = ( cn_assigner.CenterNetCenterHeatmapTargetAssigner( stride, min_box_overlap_iou, self._compute_heatmap_sparse, keypoint_class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_weights_for_center=keypoint_weights_for_center)) self._center_from_keypoints = True if self._od_params is not None: target_assigners[DETECTION_TASK] = ( cn_assigner.CenterNetBoxTargetAssigner(stride)) if self._kp_params_dict is not None: for task_name, kp_params in self._kp_params_dict.items(): target_assigners[task_name] = ( cn_assigner.CenterNetKeypointTargetAssigner( stride=stride, class_id=kp_params.class_id, keypoint_indices=kp_params.keypoint_indices, keypoint_std_dev=kp_params.keypoint_std_dev, peak_radius=kp_params.offset_peak_radius, per_keypoint_offset=kp_params.per_keypoint_offset, compute_heatmap_sparse=self._compute_heatmap_sparse, per_keypoint_depth=kp_params.per_keypoint_depth)) if self._mask_params is not None: target_assigners[SEGMENTATION_TASK] = ( cn_assigner.CenterNetMaskTargetAssigner(stride)) if self._densepose_params is not None: dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride target_assigners[DENSEPOSE_TASK] = ( cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride)) if self._track_params is not None: target_assigners[TRACK_TASK] = ( cn_assigner.CenterNetTrackTargetAssigner( stride, self._track_params.num_track_ids)) if self._temporal_offset_params is not None: target_assigners[TEMPORALOFFSET_TASK] = ( cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride)) return target_assigners def _compute_object_center_loss(self, input_height, input_width, object_center_predictions, per_pixel_weights): """Computes the object center loss. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. object_center_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_classes] representing the object center feature maps. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A float scalar tensor representing the object center loss per instance. """ gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) if self._center_params.use_labeled_classes: gt_labeled_classes_list = self.groundtruth_lists( fields.InputDataFields.groundtruth_labeled_classes) batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0) batch_labeled_classes_shape = tf.shape(batch_labeled_classes) batch_labeled_classes = tf.reshape( batch_labeled_classes, [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]]) per_pixel_weights = per_pixel_weights * batch_labeled_classes # Convert the groundtruth to targets. assigner = self._target_assigner_dict[OBJECT_CENTER] if self._center_from_keypoints: gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) heatmap_targets = assigner.assign_center_targets_from_keypoints( height=input_height, width=input_width, gt_classes_list=gt_classes_list, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list) else: gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) heatmap_targets = assigner.assign_center_targets_from_boxes( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list) flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) loss = 0.0 object_center_loss = self._center_params.classification_loss # Loop through each feature output head. for pred in object_center_predictions: pred = _flatten_spatial_dimensions(pred) loss += object_center_loss( pred, flattened_heatmap_targets, weights=per_pixel_weights) loss_per_instance = tf.reduce_sum(loss) / ( float(len(object_center_predictions)) * num_boxes) return loss_per_instance def _compute_object_detection_losses(self, input_height, input_width, prediction_dict, per_pixel_weights): """Computes the weighted object detection losses. This wrapper function calls the function which computes the losses for object detection task and applies corresponding weights to the losses. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: A dictionary holding predicted tensors output by "predict" function. See "predict" function for more detailed description. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A dictionary of scalar float tensors representing the weighted losses for object detection task: BOX_SCALE: the weighted scale (height/width) loss. BOX_OFFSET: the weighted object offset loss. """ od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss( scale_predictions=prediction_dict[BOX_SCALE], offset_predictions=prediction_dict[BOX_OFFSET], input_height=input_height, input_width=input_width) loss_dict = {} loss_dict[BOX_SCALE] = ( self._od_params.scale_loss_weight * od_scale_loss) loss_dict[BOX_OFFSET] = ( self._od_params.offset_loss_weight * od_offset_loss) return loss_dict def _compute_box_scale_and_offset_loss(self, input_height, input_width, scale_predictions, offset_predictions): """Computes the scale loss of the object detection task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. scale_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for object scale (i.e height and width). offset_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for object offset. Returns: A tuple of two losses: scale_loss: A float scalar tensor representing the object height/width loss normalized by total number of boxes. offset_loss: A float scalar tensor representing the object offset loss normalized by total number of boxes """ # TODO(vighneshb) Explore a size invariant version of scale loss. gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) num_predictions = float(len(scale_predictions)) assigner = self._target_assigner_dict[DETECTION_TASK] (batch_indices, batch_height_width_targets, batch_offset_targets, batch_weights) = assigner.assign_size_and_offset_targets( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) scale_loss = 0 offset_loss = 0 localization_loss_fn = self._od_params.localization_loss for scale_pred, offset_pred in zip(scale_predictions, offset_predictions): # Compute the scale loss. scale_pred = cn_assigner.get_batch_predictions_from_indices( scale_pred, batch_indices) scale_loss += localization_loss_fn( scale_pred, batch_height_width_targets, weights=batch_weights) # Compute the offset loss. offset_pred = cn_assigner.get_batch_predictions_from_indices( offset_pred, batch_indices) offset_loss += localization_loss_fn( offset_pred, batch_offset_targets, weights=batch_weights) scale_loss = tf.reduce_sum(scale_loss) / ( num_predictions * num_boxes) offset_loss = tf.reduce_sum(offset_loss) / ( num_predictions * num_boxes) return scale_loss, offset_loss def _compute_keypoint_estimation_losses(self, task_name, input_height, input_width, prediction_dict, per_pixel_weights): """Computes the weighted keypoint losses.""" kp_params = self._kp_params_dict[task_name] heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP) offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET) regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION) depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH) heatmap_loss = self._compute_kp_heatmap_loss( input_height=input_height, input_width=input_width, task_name=task_name, heatmap_predictions=prediction_dict[heatmap_key], classification_loss_fn=kp_params.classification_loss, per_pixel_weights=per_pixel_weights) offset_loss = self._compute_kp_offset_loss( input_height=input_height, input_width=input_width, task_name=task_name, offset_predictions=prediction_dict[offset_key], localization_loss_fn=kp_params.localization_loss) reg_loss = self._compute_kp_regression_loss( input_height=input_height, input_width=input_width, task_name=task_name, regression_predictions=prediction_dict[regression_key], localization_loss_fn=kp_params.localization_loss) loss_dict = {} loss_dict[heatmap_key] = ( kp_params.keypoint_heatmap_loss_weight * heatmap_loss) loss_dict[offset_key] = ( kp_params.keypoint_offset_loss_weight * offset_loss) loss_dict[regression_key] = ( kp_params.keypoint_regression_loss_weight * reg_loss) if kp_params.predict_depth: depth_loss = self._compute_kp_depth_loss( input_height=input_height, input_width=input_width, task_name=task_name, depth_predictions=prediction_dict[depth_key], localization_loss_fn=kp_params.localization_loss) loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss return loss_dict def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, heatmap_predictions, classification_loss_fn, per_pixel_weights): """Computes the heatmap loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. heatmap_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_keypoints] representing the prediction heads of the model for keypoint heatmap. classification_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the class predictions in CenterNet. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: loss: A float scalar tensor representing the object keypoint heatmap loss normalized by number of instances. """ gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) assigner = self._target_assigner_dict[task_name] (keypoint_heatmap, num_instances_per_kp_type, valid_mask_batch) = assigner.assign_keypoint_heatmap_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list) flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch) flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap) # Sum over the number of instances per keypoint types to get the total # number of keypoints. Note that this is used to normalized the loss and we # keep the minimum value to be 1 to avoid generating weird loss value when # no keypoint is in the image batch. num_instances = tf.maximum( tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), 1.0) loss = 0.0 # Loop through each feature output head. for pred in heatmap_predictions: pred = _flatten_spatial_dimensions(pred) unweighted_loss = classification_loss_fn( pred, flattened_heapmap_targets, weights=tf.ones_like(per_pixel_weights)) # Apply the weights after the loss function to have full control over it. loss += unweighted_loss * per_pixel_weights * flattened_valid_mask loss = tf.reduce_sum(loss) / ( float(len(heatmap_predictions)) * num_instances) return loss def _compute_kp_offset_loss(self, input_height, input_width, task_name, offset_predictions, localization_loss_fn): """Computes the offset loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. offset_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2] representing the prediction heads of the model for keypoint offset. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint offset loss normalized by number of total keypoints. """ gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) assigner = self._target_assigner_dict[task_name] (batch_indices, batch_offsets, batch_weights) = assigner.assign_keypoints_offset_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list) # Keypoint offset loss. loss = 0.0 for prediction in offset_predictions: batch_size, out_height, out_width, channels = _get_shape(prediction, 4) if channels > 2: prediction = tf.reshape( prediction, shape=[batch_size, out_height, out_width, -1, 2]) prediction = cn_assigner.get_batch_predictions_from_indices( prediction, batch_indices) # The dimensions passed are not as per the doc string but the loss # still computes the correct value. unweighted_loss = localization_loss_fn( prediction, batch_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(offset_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_kp_regression_loss(self, input_height, input_width, task_name, regression_predictions, localization_loss_fn): """Computes the keypoint regression loss of the keypoint estimation task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. regression_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2 * num_keypoints] representing the prediction heads of the model for keypoint regression offset. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint regression offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint regression offset loss normalized by number of total keypoints. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) # keypoint regression offset loss. assigner = self._target_assigner_dict[task_name] (batch_indices, batch_regression_offsets, batch_weights) = assigner.assign_joint_regression_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_weights_list=gt_weights_list, gt_boxes_list=gt_boxes_list) loss = 0.0 for prediction in regression_predictions: batch_size, out_height, out_width, _ = _get_shape(prediction, 4) reshaped_prediction = tf.reshape( prediction, shape=[batch_size, out_height, out_width, -1, 2]) reg_prediction = cn_assigner.get_batch_predictions_from_indices( reshaped_prediction, batch_indices) unweighted_loss = localization_loss_fn( reg_prediction, batch_regression_offsets, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(regression_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_kp_depth_loss(self, input_height, input_width, task_name, depth_predictions, localization_loss_fn): """Computes the loss of the keypoint depth estimation. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. task_name: A string representing the name of the keypoint task. depth_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 1 (or num_keypoints)] representing the prediction heads of the model for keypoint depth. localization_loss_fn: An object_detection.core.losses.Loss object to compute the loss for the keypoint offset predictions in CenterNet. Returns: loss: A float scalar tensor representing the keypoint depth loss normalized by number of total keypoints. """ kp_params = self._kp_params_dict[task_name] gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_keypoint_depths_list = self.groundtruth_lists( fields.BoxListFields.keypoint_depths) gt_keypoint_depth_weights_list = self.groundtruth_lists( fields.BoxListFields.keypoint_depth_weights) assigner = self._target_assigner_dict[task_name] (batch_indices, batch_depths, batch_weights) = assigner.assign_keypoints_depth_targets( height=input_height, width=input_width, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_classes_list=gt_classes_list, gt_keypoint_depths_list=gt_keypoint_depths_list, gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list) # Keypoint offset loss. loss = 0.0 for prediction in depth_predictions: if kp_params.per_keypoint_depth: prediction = tf.expand_dims(prediction, axis=-1) selected_depths = cn_assigner.get_batch_predictions_from_indices( prediction, batch_indices) # The dimensions passed are not as per the doc string but the loss # still computes the correct value. unweighted_loss = localization_loss_fn( selected_depths, batch_depths, weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) # Apply the weights after the loss function to have full control over it. loss += batch_weights * tf.squeeze(unweighted_loss, axis=1) loss = tf.reduce_sum(loss) / ( float(len(depth_predictions)) * tf.maximum(tf.reduce_sum(batch_weights), 1.0)) return loss def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights): """Computes all the losses associated with segmentation. Args: prediction_dict: The dictionary returned from the predict() method. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A dictionary with segmentation losses. """ segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP] mask_loss = self._compute_mask_loss( segmentation_heatmap, per_pixel_weights) losses = { SEGMENTATION_HEATMAP: mask_loss } return losses def _compute_mask_loss(self, segmentation_predictions, per_pixel_weights): """Computes the mask loss. Args: segmentation_predictions: A list of float32 tensors of shape [batch_size, out_height, out_width, num_classes]. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A float scalar tensor representing the mask loss. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) gt_mask_weights_list = None if self.groundtruth_has_field(fields.BoxListFields.mask_weights): gt_mask_weights_list = self.groundtruth_lists( fields.BoxListFields.mask_weights) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) # Convert the groundtruth to targets. assigner = self._target_assigner_dict[SEGMENTATION_TASK] heatmap_targets, heatmap_weight = assigner.assign_segmentation_targets( gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list, gt_mask_weights_list=gt_mask_weights_list) flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) flattened_heatmap_mask = _flatten_spatial_dimensions( heatmap_weight[:, :, :, tf.newaxis]) per_pixel_weights *= flattened_heatmap_mask loss = 0.0 mask_loss_fn = self._mask_params.classification_loss total_pixels_in_loss = tf.math.maximum( tf.reduce_sum(per_pixel_weights), 1) # Loop through each feature output head. for pred in segmentation_predictions: pred = _flatten_spatial_dimensions(pred) loss += mask_loss_fn( pred, flattened_heatmap_targets, weights=per_pixel_weights) # TODO(ronnyvotel): Consider other ways to normalize loss. total_loss = tf.reduce_sum(loss) / ( float(len(segmentation_predictions)) * total_pixels_in_loss) return total_loss def _compute_densepose_losses(self, input_height, input_width, prediction_dict): """Computes the weighted DensePose losses. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: A dictionary holding predicted tensors output by the "predict" function. See the "predict" function for more detailed description. Returns: A dictionary of scalar float tensors representing the weighted losses for the DensePose task: DENSEPOSE_HEATMAP: the weighted part segmentation loss. DENSEPOSE_REGRESSION: the weighted part surface coordinate loss. """ dp_heatmap_loss, dp_regression_loss = ( self._compute_densepose_part_and_coordinate_losses( input_height=input_height, input_width=input_width, part_predictions=prediction_dict[DENSEPOSE_HEATMAP], surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION])) loss_dict = {} loss_dict[DENSEPOSE_HEATMAP] = ( self._densepose_params.part_loss_weight * dp_heatmap_loss) loss_dict[DENSEPOSE_REGRESSION] = ( self._densepose_params.coordinate_loss_weight * dp_regression_loss) return loss_dict def _compute_densepose_part_and_coordinate_losses( self, input_height, input_width, part_predictions, surface_coord_predictions): """Computes the individual losses for the DensePose task. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. part_predictions: A list of float tensors of shape [batch_size, out_height, out_width, num_parts]. surface_coord_predictions: A list of float tensors of shape [batch_size, out_height, out_width, 2 * num_parts]. Returns: A tuple with two scalar loss tensors: part_prediction_loss and surface_coord_loss. """ gt_dp_num_points_list = self.groundtruth_lists( fields.BoxListFields.densepose_num_points) gt_dp_part_ids_list = self.groundtruth_lists( fields.BoxListFields.densepose_part_ids) gt_dp_surface_coords_list = self.groundtruth_lists( fields.BoxListFields.densepose_surface_coords) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) assigner = self._target_assigner_dict[DENSEPOSE_TASK] batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( assigner.assign_part_and_coordinate_targets( height=input_height, width=input_width, gt_dp_num_points_list=gt_dp_num_points_list, gt_dp_part_ids_list=gt_dp_part_ids_list, gt_dp_surface_coords_list=gt_dp_surface_coords_list, gt_weights_list=gt_weights_list)) part_prediction_loss = 0 surface_coord_loss = 0 classification_loss_fn = self._densepose_params.classification_loss localization_loss_fn = self._densepose_params.localization_loss num_predictions = float(len(part_predictions)) num_valid_points = tf.math.count_nonzero(batch_weights) num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32) for part_pred, surface_coord_pred in zip(part_predictions, surface_coord_predictions): # Potentially upsample the feature maps, so that better quality (i.e. # higher res) groundtruth can be applied. if self._densepose_params.upsample_to_input_res: part_pred = tf.keras.layers.UpSampling2D( self._stride, interpolation=self._densepose_params.upsample_method)( part_pred) surface_coord_pred = tf.keras.layers.UpSampling2D( self._stride, interpolation=self._densepose_params.upsample_method)( surface_coord_pred) # Compute the part prediction loss. part_pred = cn_assigner.get_batch_predictions_from_indices( part_pred, batch_indices[:, 0:3]) part_prediction_loss += classification_loss_fn( part_pred[:, tf.newaxis, :], batch_part_ids[:, tf.newaxis, :], weights=batch_weights[:, tf.newaxis, tf.newaxis]) # Compute the surface coordinate loss. batch_size, out_height, out_width, _ = _get_shape( surface_coord_pred, 4) surface_coord_pred = tf.reshape( surface_coord_pred, [batch_size, out_height, out_width, -1, 2]) surface_coord_pred = cn_assigner.get_batch_predictions_from_indices( surface_coord_pred, batch_indices) surface_coord_loss += localization_loss_fn( surface_coord_pred, batch_surface_coords, weights=batch_weights[:, tf.newaxis]) part_prediction_loss = tf.reduce_sum(part_prediction_loss) / ( num_predictions * num_valid_points) surface_coord_loss = tf.reduce_sum(surface_coord_loss) / ( num_predictions * num_valid_points) return part_prediction_loss, surface_coord_loss def _compute_track_losses(self, input_height, input_width, prediction_dict): """Computes all the losses associated with tracking. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: The dictionary returned from the predict() method. Returns: A dictionary with tracking losses. """ object_reid_predictions = prediction_dict[TRACK_REID] embedding_loss = self._compute_track_embedding_loss( input_height=input_height, input_width=input_width, object_reid_predictions=object_reid_predictions) losses = { TRACK_REID: embedding_loss } return losses def _compute_track_embedding_loss(self, input_height, input_width, object_reid_predictions): """Computes the object ReID loss. The embedding is trained as a classification task where the target is the ID of each track among all tracks in the whole dataset. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. object_reid_predictions: A list of float tensors of shape [batch_size, out_height, out_width, reid_embed_size] representing the object embedding feature maps. Returns: A float scalar tensor representing the object ReID loss per instance. """ gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids) gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) # Convert the groundtruth to targets. assigner = self._target_assigner_dict[TRACK_TASK] batch_indices, batch_weights, track_targets = assigner.assign_track_targets( height=input_height, width=input_width, gt_track_ids_list=gt_track_ids_list, gt_boxes_list=gt_boxes_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) loss = 0.0 object_reid_loss = self._track_params.classification_loss # Loop through each feature output head. for pred in object_reid_predictions: embedding_pred = cn_assigner.get_batch_predictions_from_indices( pred, batch_indices) reid_classification = self.track_reid_classification_net(embedding_pred) loss += object_reid_loss( reid_classification, track_targets, weights=batch_weights) loss_per_instance = tf.reduce_sum(loss) / ( float(len(object_reid_predictions)) * num_boxes) return loss_per_instance def _compute_temporal_offset_loss(self, input_height, input_width, prediction_dict): """Computes the temporal offset loss for tracking. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: The dictionary returned from the predict() method. Returns: A dictionary with track/temporal_offset losses. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_offsets_list = self.groundtruth_lists( fields.BoxListFields.temporal_offsets) gt_match_list = self.groundtruth_lists( fields.BoxListFields.track_match_flags) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) num_boxes = tf.cast( get_num_instances_from_weights(gt_weights_list), tf.float32) offset_predictions = prediction_dict[TEMPORAL_OFFSET] num_predictions = float(len(offset_predictions)) assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK] (batch_indices, batch_offset_targets, batch_weights) = assigner.assign_temporal_offset_targets( height=input_height, width=input_width, gt_boxes_list=gt_boxes_list, gt_offsets_list=gt_offsets_list, gt_match_list=gt_match_list, gt_weights_list=gt_weights_list) batch_weights = tf.expand_dims(batch_weights, -1) offset_loss_fn = self._temporal_offset_params.localization_loss loss_dict = {} offset_loss = 0 for offset_pred in offset_predictions: offset_pred = cn_assigner.get_batch_predictions_from_indices( offset_pred, batch_indices) offset_loss += offset_loss_fn(offset_pred[:, None], batch_offset_targets[:, None], weights=batch_weights) offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes) loss_dict[TEMPORAL_OFFSET] = offset_loss return loss_dict def _should_clip_keypoints(self): """Returns a boolean indicating whether keypoint clipping should occur. If there is only one keypoint task, clipping is controlled by the field `clip_out_of_frame_keypoints`. If there are multiple keypoint tasks, clipping logic is defined based on unanimous agreement of keypoint parameters. If there is any ambiguity, clip_out_of_frame_keypoints is set to False (default). """ kp_params_iterator = iter(self._kp_params_dict.values()) if len(self._kp_params_dict) == 1: kp_params = next(kp_params_iterator) return kp_params.clip_out_of_frame_keypoints # Multi-task setting. kp_params = next(kp_params_iterator) should_clip = kp_params.clip_out_of_frame_keypoints for kp_params in kp_params_iterator: if kp_params.clip_out_of_frame_keypoints != should_clip: return False return should_clip def _rescore_instances(self, classes, scores, keypoint_scores): """Rescores instances based on detection and keypoint scores. Args: classes: A [batch, max_detections] int32 tensor with detection classes. scores: A [batch, max_detections] float32 tensor with detection scores. keypoint_scores: A [batch, max_detections, total_num_keypoints] float32 tensor with keypoint scores. Returns: A [batch, max_detections] float32 tensor with possibly altered detection scores. """ batch, max_detections, total_num_keypoints = ( shape_utils.combined_static_and_dynamic_shape(keypoint_scores)) classes_tiled = tf.tile(classes[:, :, tf.newaxis], multiples=[1, 1, total_num_keypoints]) # TODO(yuhuic): Investigate whether this function will create subgraphs in # tflite that will cause the model to run slower at inference. for kp_params in self._kp_params_dict.values(): if not kp_params.rescore_instances: continue class_id = kp_params.class_id keypoint_indices = kp_params.keypoint_indices kpt_mask = tf.reduce_sum( tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0) kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :], multiples=[batch, max_detections, 1]) class_and_keypoint_mask = tf.math.logical_and( classes_tiled == class_id, kpt_mask_tiled == 1.0) class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask, dtype=tf.float32) visible_keypoints = tf.math.greater(keypoint_scores, 0.0) num_visible_keypoints = tf.reduce_sum( class_and_keypoint_mask_float * tf.cast(visible_keypoints, tf.float32), axis=-1) num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0) scores_for_class = (1./num_visible_keypoints) * ( tf.reduce_sum(class_and_keypoint_mask_float * scores[:, :, tf.newaxis] * keypoint_scores, axis=-1)) scores = tf.where(classes == class_id, scores_for_class, scores) return scores def preprocess(self, inputs): outputs = shape_utils.resize_images_and_return_shapes( inputs, self._image_resizer_fn) resized_inputs, true_image_shapes = outputs return (self._feature_extractor.preprocess(resized_inputs), true_image_shapes) def predict(self, preprocessed_inputs, _): """Predicts CenterNet prediction tensors given an input batch. Feature extractors are free to produce predictions from multiple feature maps and therefore we return a dictionary mapping strings to lists. E.g. the hourglass backbone produces two feature maps. Args: preprocessed_inputs: a [batch, height, width, channels] float32 tensor representing a batch of images. Returns: prediction_dict: a dictionary holding predicted tensors with 'preprocessed_inputs' - The input image after being resized and preprocessed by the feature extractor. 'object_center' - A list of size num_feature_outputs containing float tensors of size [batch_size, output_height, output_width, num_classes] representing the predicted object center heatmap logits. 'box/scale' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted box height and width at each output location. This field exists only when object detection task is specified. 'box/offset' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted y and x offsets at each output location. '$TASK_NAME/keypoint_heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_keypoints] representing the predicted keypoint heatmap logits. '$TASK_NAME/keypoint_offset' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2] representing the predicted keypoint offsets at each output location. '$TASK_NAME/keypoint_regression' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2 * num_keypoints] representing the predicted keypoint regression at each output location. 'segmentation/heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_classes] representing the mask logits. 'densepose/heatmap' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, num_parts] representing the mask logits for each part. 'densepose/regression' - [optional] A list of size num_feature_outputs holding float tensors of size [batch_size, output_height, output_width, 2 * num_parts] representing the DensePose surface coordinate predictions. Note the $TASK_NAME is provided by the KeypointEstimation namedtuple used to differentiate between different keypoint tasks. """ features_list = self._feature_extractor(preprocessed_inputs) predictions = {} for head_name, heads in self._prediction_head_dict.items(): predictions[head_name] = [ head(feature) for (feature, head) in zip(features_list, heads) ] predictions['preprocessed_inputs'] = preprocessed_inputs self._batched_prediction_tensor_names = predictions.keys() return predictions def loss(self, prediction_dict, true_image_shapes, scope=None): """Computes scalar loss tensors with respect to provided groundtruth. This function implements the various CenterNet losses. Args: prediction_dict: a dictionary holding predicted tensors returned by "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: A dictionary mapping the keys [ 'Loss/object_center', 'Loss/box/scale', (optional) 'Loss/box/offset', (optional) 'Loss/$TASK_NAME/keypoint/heatmap', (optional) 'Loss/$TASK_NAME/keypoint/offset', (optional) 'Loss/$TASK_NAME/keypoint/regression', (optional) 'Loss/segmentation/heatmap', (optional) 'Loss/densepose/heatmap', (optional) 'Loss/densepose/regression', (optional) 'Loss/track/reid'] (optional) 'Loss/track/offset'] (optional) scalar tensors corresponding to the losses for different tasks. Note the $TASK_NAME is provided by the KeypointEstimation namedtuple used to differentiate between different keypoint tasks. """ _, input_height, input_width, _ = _get_shape( prediction_dict['preprocessed_inputs'], 4) output_height, output_width = (tf.maximum(input_height // self._stride, 1), tf.maximum(input_width // self._stride, 1)) # TODO(vighneshb) Explore whether using floor here is safe. output_true_image_shapes = tf.ceil( tf.to_float(true_image_shapes) / self._stride) valid_anchor_weights = get_valid_anchor_weights_in_flattened_image( output_true_image_shapes, output_height, output_width) valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2) object_center_loss = self._compute_object_center_loss( object_center_predictions=prediction_dict[OBJECT_CENTER], input_height=input_height, input_width=input_width, per_pixel_weights=valid_anchor_weights) losses = { OBJECT_CENTER: self._center_params.object_center_loss_weight * object_center_loss } if self._od_params is not None: od_losses = self._compute_object_detection_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in od_losses: od_losses[key] = od_losses[key] * self._od_params.task_loss_weight losses.update(od_losses) if self._kp_params_dict is not None: for task_name, params in self._kp_params_dict.items(): kp_losses = self._compute_keypoint_estimation_losses( task_name=task_name, input_height=input_height, input_width=input_width, prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in kp_losses: kp_losses[key] = kp_losses[key] * params.task_loss_weight losses.update(kp_losses) if self._mask_params is not None: seg_losses = self._compute_segmentation_losses( prediction_dict=prediction_dict, per_pixel_weights=valid_anchor_weights) for key in seg_losses: seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight losses.update(seg_losses) if self._densepose_params is not None: densepose_losses = self._compute_densepose_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in densepose_losses: densepose_losses[key] = ( densepose_losses[key] * self._densepose_params.task_loss_weight) losses.update(densepose_losses) if self._track_params is not None: track_losses = self._compute_track_losses( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in track_losses: track_losses[key] = ( track_losses[key] * self._track_params.task_loss_weight) losses.update(track_losses) if self._temporal_offset_params is not None: offset_losses = self._compute_temporal_offset_loss( input_height=input_height, input_width=input_width, prediction_dict=prediction_dict) for key in offset_losses: offset_losses[key] = ( offset_losses[key] * self._temporal_offset_params.task_loss_weight) losses.update(offset_losses) # Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the # losses will be grouped together in Tensorboard. return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val) for key, val in losses.items()]) def postprocess(self, prediction_dict, true_image_shapes, **params): """Produces boxes given a prediction dict returned by predict(). Although predict returns a list of tensors, only the last tensor in each list is used for making box predictions. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_boxes - A tensor of shape [batch, max_detections, 4] holding the predicted boxes. detection_boxes_strided: A tensor of shape [batch_size, num_detections, 4] holding the predicted boxes in absolute coordinates of the feature extractor's final layer output. detection_scores: A tensor of shape [batch, max_detections] holding the predicted score for each box. detection_multiclass_scores: A tensor of shape [batch, max_detection, num_classes] holding multiclass score for each box. detection_classes: An integer tensor of shape [batch, max_detections] containing the detected class for each box. num_detections: An integer tensor of shape [batch] containing the number of detected boxes for each sample in the batch. detection_keypoints: (Optional) A float tensor of shape [batch, max_detections, num_keypoints, 2] with normalized keypoints. Any invalid keypoints have their coordinates and scores set to 0.0. detection_keypoint_scores: (Optional) A float tensor of shape [batch, max_detection, num_keypoints] with scores for each keypoint. detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). detection_surface_coords: (Optional) A float32 tensor of shape [batch, max_detection, mask_height, mask_width, 2] with DensePose surface coordinates, in (v, u) format. detection_embeddings: (Optional) A float tensor of shape [batch, max_detections, reid_embed_size] containing object embeddings. """ object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) # Get x, y and channel indices corresponding to the top indices in the class # center predictions. detection_scores, y_indices, x_indices, channel_indices = ( top_k_feature_map_locations( object_center_prob, max_pool_kernel_size=3, k=self._center_params.max_box_predictions)) multiclass_scores = tf.gather_nd( object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1) num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1) postprocess_dict = { fields.DetectionResultFields.detection_scores: detection_scores, fields.DetectionResultFields.detection_multiclass_scores: multiclass_scores, fields.DetectionResultFields.detection_classes: channel_indices, fields.DetectionResultFields.num_detections: num_detections, } boxes_strided = None if self._od_params: boxes_strided = ( prediction_tensors_to_boxes(y_indices, x_indices, prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1])) boxes = convert_strided_predictions_to_normalized_boxes( boxes_strided, self._stride, true_image_shapes) postprocess_dict.update({ fields.DetectionResultFields.detection_boxes: boxes, 'detection_boxes_strided': boxes_strided }) if self._kp_params_dict: # If the model is trained to predict only one class of object and its # keypoint, we fall back to a simpler postprocessing function which uses # the ops that are supported by tf.lite on GPU. clip_keypoints = self._should_clip_keypoints() if len(self._kp_params_dict) == 1 and self._num_classes == 1: (keypoints, keypoint_scores, keypoint_depths) = self._postprocess_keypoints_single_class( prediction_dict, channel_indices, y_indices, x_indices, boxes_strided, num_detections) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=clip_keypoints)) if keypoint_depths is not None: postprocess_dict.update({ fields.DetectionResultFields.detection_keypoint_depths: keypoint_depths }) else: # Multi-class keypoint estimation task does not support depth # estimation. assert all([ not kp_dict.predict_depth for kp_dict in self._kp_params_dict.values() ]) keypoints, keypoint_scores = self._postprocess_keypoints_multi_class( prediction_dict, channel_indices, y_indices, x_indices, None, num_detections) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=clip_keypoints)) # Update instance scores based on keypoints. scores = self._rescore_instances( channel_indices, detection_scores, keypoint_scores) postprocess_dict.update({ fields.DetectionResultFields.detection_scores: scores, fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores }) if self._od_params is None: # Still output the box prediction by enclosing the keypoints for # evaluation purpose. boxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes( keypoints, keypoints_axis=2) postprocess_dict.update({ fields.DetectionResultFields.detection_boxes: boxes, }) if self._mask_params: masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1]) densepose_part_heatmap, densepose_surface_coords = None, None densepose_class_index = 0 if self._densepose_params: densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1] densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1] densepose_class_index = self._densepose_params.class_id instance_masks, surface_coords = ( convert_strided_predictions_to_instance_masks( boxes, channel_indices, masks, true_image_shapes, densepose_part_heatmap, densepose_surface_coords, stride=self._stride, mask_height=self._mask_params.mask_height, mask_width=self._mask_params.mask_width, score_threshold=self._mask_params.score_threshold, densepose_class_index=densepose_class_index)) postprocess_dict[ fields.DetectionResultFields.detection_masks] = instance_masks if self._densepose_params: postprocess_dict[ fields.DetectionResultFields.detection_surface_coords] = ( surface_coords) if self._track_params: embeddings = self._postprocess_embeddings(prediction_dict, y_indices, x_indices) postprocess_dict.update({ fields.DetectionResultFields.detection_embeddings: embeddings }) if self._temporal_offset_params: offsets = prediction_tensors_to_temporal_offsets( y_indices, x_indices, prediction_dict[TEMPORAL_OFFSET][-1]) postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets if self._non_max_suppression_fn: boxes = tf.expand_dims( postprocess_dict.pop(fields.DetectionResultFields.detection_boxes), axis=-2) multiclass_scores = postprocess_dict[ fields.DetectionResultFields.detection_multiclass_scores] num_valid_boxes = postprocess_dict.pop( fields.DetectionResultFields.num_detections) # Remove scores and classes as NMS will compute these form multiclass # scores. postprocess_dict.pop(fields.DetectionResultFields.detection_scores) postprocess_dict.pop(fields.DetectionResultFields.detection_classes) (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = self._non_max_suppression_fn( boxes, multiclass_scores, additional_fields=postprocess_dict, num_valid_boxes=num_valid_boxes) postprocess_dict = nmsed_additional_fields postprocess_dict[ fields.DetectionResultFields.detection_boxes] = nmsed_boxes postprocess_dict[ fields.DetectionResultFields.detection_scores] = nmsed_scores postprocess_dict[ fields.DetectionResultFields.detection_classes] = nmsed_classes postprocess_dict[ fields.DetectionResultFields.num_detections] = num_detections postprocess_dict.update(nmsed_additional_fields) return postprocess_dict def postprocess_single_instance_keypoints( self, prediction_dict, true_image_shapes): """Postprocess for predicting single instance keypoints. This postprocess function is a special case of predicting the keypoint of a single instance in the image (original CenterNet postprocess supports multi-instance prediction). Due to the simplification assumption, this postprocessing function achieves much faster inference time. Here is a short list of the modifications made in this function: 1) Assume the model predicts only single class keypoint. 2) Assume there is only one instance in the image. If multiple instances appear in the image, the model tends to predict the one that is closer to the image center (the other ones are considered as background and are rejected by the model). 3) Avoid using top_k ops in the postprocessing logics since it is slower than using argmax. 4) The predictions other than the keypoints are ignored, e.g. boxes. 5) The input batch size is assumed to be 1. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: detections: a dictionary containing the following fields detection_keypoints: A float tensor of shape [1, 1, num_keypoints, 2] with normalized keypoints. Any invalid keypoints have their coordinates and scores set to 0.0. detection_keypoint_scores: A float tensor of shape [1, 1, num_keypoints] with scores for each keypoint. """ # The number of keypoint task is expected to be 1. assert len(self._kp_params_dict) == 1 task_name, kp_params = next(iter(self._kp_params_dict.items())) keypoint_heatmap = tf.nn.sigmoid(prediction_dict[get_keypoint_name( task_name, KEYPOINT_HEATMAP)][-1]) keypoint_offset = prediction_dict[get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[get_keypoint_name( task_name, KEYPOINT_REGRESSION)][-1] object_heatmap = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) keypoint_depths = None if kp_params.predict_depth: keypoint_depths = prediction_dict[get_keypoint_name( task_name, KEYPOINT_DEPTH)][-1] keypoints, keypoint_scores, keypoint_depths = ( prediction_to_single_instance_keypoints( object_heatmap=object_heatmap, keypoint_heatmap=keypoint_heatmap, keypoint_offset=keypoint_offset, keypoint_regression=keypoint_regression, kp_params=kp_params, keypoint_depths=keypoint_depths)) keypoints, keypoint_scores = ( convert_strided_predictions_to_normalized_keypoints( keypoints, keypoint_scores, self._stride, true_image_shapes, clip_out_of_frame_keypoints=False)) postprocess_dict = { fields.DetectionResultFields.detection_keypoints: keypoints, fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores } if kp_params.predict_depth: postprocess_dict.update({ fields.DetectionResultFields.detection_keypoint_depths: keypoint_depths }) return postprocess_dict def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices): """Performs postprocessing on embedding predictions. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain embedding prediction feature maps for tracking task. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. Returns: embeddings: A [batch_size, max_detection, reid_embed_size] float32 tensor with L2 normalized embeddings extracted from detection box centers. """ embedding_predictions = prediction_dict[TRACK_REID][-1] embeddings = predicted_embeddings_at_object_centers( embedding_predictions, y_indices, x_indices) embeddings, _ = tf.linalg.normalize(embeddings, axis=-1) return embeddings def _scatter_keypoints_to_batch(self, num_ind, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, max_detections, total_num_keypoints): """Helper function to convert scattered keypoints into batch.""" def left_fn(kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example): # Scatter into tensor where instances align with original detection # instances. New shape of keypoint coordinates and scores are # [1, max_detections, num_total_keypoints, 2] and # [1, max_detections, num_total_keypoints], respectively. return _pad_to_full_instance_dim( kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, self._center_params.max_box_predictions) def right_fn(): kpt_coords_for_example_all_det = tf.zeros( [1, max_detections, total_num_keypoints, 2], dtype=tf.float32) kpt_scores_for_example_all_det = tf.zeros( [1, max_detections, total_num_keypoints], dtype=tf.float32) return (kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) left_fn = functools.partial(left_fn, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example) # Use dimension values instead of tf.size for tf.lite compatibility. return tf.cond(num_ind[0] > 0, left_fn, right_fn) def _postprocess_keypoints_multi_class(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections): """Performs postprocessing on keypoint predictions. This is the most general keypoint postprocessing function which supports multiple keypoint tasks (e.g. human and dog keypoints) and multiple object detection classes. Note that it is the most expensive postprocessing logics and is currently not tf.lite/tf.js compatible. See _postprocess_keypoints_single_class if you plan to export the model in more portable format. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain keypoint prediction feature maps for each keypoint task. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with bounding boxes in (un-normalized) output space. num_detections: A [batch_size] int tensor with the number of valid detections for each image. Returns: A tuple of keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 tensor with keypoints in the output (strided) coordinate frame. keypoint_scores: a [batch_size, max_detections, num_total_keypoints] float32 tensor with keypoint scores. """ total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict in self._kp_params_dict.values()) batch_size, max_detections = _get_shape(classes, 2) kpt_coords_for_example_list = [] kpt_scores_for_example_list = [] for ex_ind in range(batch_size): kpt_coords_for_class_list = [] kpt_scores_for_class_list = [] instance_inds_for_class_list = [] for task_name, kp_params in self._kp_params_dict.items(): keypoint_heatmap = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] keypoint_offsets = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] instance_inds = self._get_instance_indices( classes, num_detections, ex_ind, kp_params.class_id) num_ind = _get_shape(instance_inds, 1) def true_fn(keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params): """Logics to execute when instance_inds is not an empty set.""" # Gather the feature map locations corresponding to the object class. y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1) x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1) if boxes is None: boxes_for_kpt_class = None else: boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1) # Postprocess keypoints and scores for class and single image. Shapes # are [1, num_instances_i, num_keypoints_i, 2] and # [1, num_instances_i, num_keypoints_i], respectively. Note that # num_instances_i and num_keypoints_i refers to the number of # instances and keypoints for class i, respectively. (kpt_coords_for_class, kpt_scores_for_class, _) = ( self._postprocess_keypoints_for_class_and_image( keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices_for_kpt_class, x_indices_for_kpt_class, boxes_for_kpt_class, ex_ind, kp_params)) # Expand keypoint dimension (with padding) so that coordinates and # scores have shape [1, num_instances_i, num_total_keypoints, 2] and # [1, num_instances_i, num_total_keypoints], respectively. kpts_coords_for_class_padded, kpt_scores_for_class_padded = ( _pad_to_full_keypoint_dim(kpt_coords_for_class, kpt_scores_for_class, kp_params.keypoint_indices, total_num_keypoints)) return kpts_coords_for_class_padded, kpt_scores_for_class_padded def false_fn(): """Logics to execute when the instance_inds is an empty set.""" return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32)) true_fn = functools.partial( true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, instance_inds, ex_ind, kp_params) # Use dimension values instead of tf.size for tf.lite compatibility. results = tf.cond(num_ind[0] > 0, true_fn, false_fn) kpt_coords_for_class_list.append(results[0]) kpt_scores_for_class_list.append(results[1]) instance_inds_for_class_list.append(instance_inds) # Concatenate all keypoints across all classes (single example). kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1) kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1) instance_inds_for_example = tf.concat(instance_inds_for_class_list, axis=0) (kpt_coords_for_example_all_det, kpt_scores_for_example_all_det) = self._scatter_keypoints_to_batch( num_ind, kpt_coords_for_example, kpt_scores_for_example, instance_inds_for_example, max_detections, total_num_keypoints) kpt_coords_for_example_list.append(kpt_coords_for_example_all_det) kpt_scores_for_example_list.append(kpt_scores_for_example_all_det) # Concatenate all keypoints and scores from all examples in the batch. # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and # [batch_size, max_detections, num_total_keypoints], respectively. keypoints = tf.concat(kpt_coords_for_example_list, axis=0) keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) return keypoints, keypoint_scores def _postprocess_keypoints_single_class(self, prediction_dict, classes, y_indices, x_indices, boxes, num_detections): """Performs postprocessing on keypoint predictions (single class only). This function handles the special case of keypoint task that the model predicts only one class of the bounding box/keypoint (e.g. person). By the assumption, the function uses only tf.lite supported ops and should run faster. Args: prediction_dict: a dictionary holding predicted tensors, returned from the predict() method. This dictionary should contain keypoint prediction feature maps for each keypoint task. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with bounding boxes in (un-normalized) output space. num_detections: A [batch_size] int tensor with the number of valid detections for each image. Returns: A tuple of keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 tensor with keypoints in the output (strided) coordinate frame. keypoint_scores: a [batch_size, max_detections, num_total_keypoints] float32 tensor with keypoint scores. """ # This function only works when there is only one keypoint task and the # number of classes equal to one. For more general use cases, please use # _postprocess_keypoints instead. assert len(self._kp_params_dict) == 1 and self._num_classes == 1 task_name, kp_params = next(iter(self._kp_params_dict.items())) keypoint_heatmap = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] keypoint_offsets = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] keypoint_regression = prediction_dict[ get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] keypoint_depth_predictions = None if kp_params.predict_depth: keypoint_depth_predictions = prediction_dict[get_keypoint_name( task_name, KEYPOINT_DEPTH)][-1] batch_size, _ = _get_shape(classes, 2) kpt_coords_for_example_list = [] kpt_scores_for_example_list = [] kpt_depths_for_example_list = [] for ex_ind in range(batch_size): # Postprocess keypoints and scores for class and single image. Shapes # are [1, max_detections, num_keypoints, 2] and # [1, max_detections, num_keypoints], respectively. (kpt_coords_for_class, kpt_scores_for_class, kpt_depths_for_class) = ( self._postprocess_keypoints_for_class_and_image( keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, ex_ind, kp_params, keypoint_depth_predictions=keypoint_depth_predictions)) kpt_coords_for_example_list.append(kpt_coords_for_class) kpt_scores_for_example_list.append(kpt_scores_for_class) kpt_depths_for_example_list.append(kpt_depths_for_class) # Concatenate all keypoints and scores from all examples in the batch. # Shapes are [batch_size, max_detections, num_keypoints, 2] and # [batch_size, max_detections, num_keypoints], respectively. keypoints = tf.concat(kpt_coords_for_example_list, axis=0) keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) keypoint_depths = None if kp_params.predict_depth: keypoint_depths = tf.concat(kpt_depths_for_example_list, axis=0) return keypoints, keypoint_scores, keypoint_depths def _get_instance_indices(self, classes, num_detections, batch_index, class_id): """Gets the instance indices that match the target class ID. Args: classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. num_detections: A [batch_size] int tensor with the number of valid detections for each image. batch_index: An integer specifying the index for an example in the batch. class_id: Class id Returns: instance_inds: A [num_instances] int32 tensor where each element indicates the instance location within the `classes` tensor. This is useful to associate the refined keypoints with the original detections (i.e. boxes) """ classes = classes[batch_index:batch_index+1, ...] _, max_detections = shape_utils.combined_static_and_dynamic_shape( classes) # Get the detection indices corresponding to the target class. # Call tf.math.equal with matched tensor shape to make it tf.lite # compatible. valid_detections_with_kpt_class = tf.math.logical_and( tf.range(max_detections) < num_detections[batch_index], tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id))) instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0] # Cast the indices tensor to int32 for tf.lite compatibility. return tf.cast(instance_inds, tf.int32) def _postprocess_keypoints_for_class_and_image( self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, y_indices, x_indices, boxes, batch_index, kp_params, keypoint_depth_predictions=None): """Postprocess keypoints for a single image and class. Args: keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32 tensor with keypoint heatmaps. keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with local offsets to keypoint centers. keypoint_regression: A [batch_size, height, width, 2 * num_keypoints] float32 tensor with regressed offsets to all keypoints. classes: A [batch_size, max_detections] int tensor with class indices for all detected objects. y_indices: A [batch_size, max_detections] int tensor with y indices for all object centers. x_indices: A [batch_size, max_detections] int tensor with x indices for all object centers. boxes: A [batch_size, max_detections, 4] float32 tensor with detected boxes in the output (strided) frame. batch_index: An integer specifying the index for an example in the batch. kp_params: A `KeypointEstimationParams` object with parameters for a single keypoint class. keypoint_depth_predictions: (optional) A [batch_size, height, width, 1] float32 tensor representing the keypoint depth prediction. Returns: A tuple of refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor with refined keypoints for a single class in a single image, expressed in the output (strided) coordinate frame. Note that `num_instances` is a dynamic dimension, and corresponds to the number of valid detections for the specific class. refined_scores: A [1, num_instances, num_keypoints] float32 tensor with keypoint scores. refined_depths: A [1, num_instances, num_keypoints] float32 tensor with keypoint depths. Return None if the input keypoint_depth_predictions is None. """ num_keypoints = len(kp_params.keypoint_indices) keypoint_heatmap = tf.nn.sigmoid( keypoint_heatmap[batch_index:batch_index+1, ...]) keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...] keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...] keypoint_depths = None if keypoint_depth_predictions is not None: keypoint_depths = keypoint_depth_predictions[batch_index:batch_index + 1, ...] y_indices = y_indices[batch_index:batch_index+1, ...] x_indices = x_indices[batch_index:batch_index+1, ...] if boxes is None: boxes_slice = None else: boxes_slice = boxes[batch_index:batch_index+1, ...] # Gather the regressed keypoints. Final tensor has shape # [1, num_instances, num_keypoints, 2]. regressed_keypoints_for_objects = regressed_keypoints_at_object_centers( keypoint_regression, y_indices, x_indices) regressed_keypoints_for_objects = tf.reshape( regressed_keypoints_for_objects, [1, -1, num_keypoints, 2]) # Get the candidate keypoints and scores. # The shape of keypoint_candidates and keypoint_scores is: # [1, num_candidates_per_keypoint, num_keypoints, 2] and # [1, num_candidates_per_keypoint, num_keypoints], respectively. (keypoint_candidates, keypoint_scores, num_keypoint_candidates, keypoint_depth_candidates) = ( prediction_tensors_to_keypoint_candidates( keypoint_heatmap, keypoint_offsets, keypoint_score_threshold=( kp_params.keypoint_candidate_score_threshold), max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, max_candidates=kp_params.num_candidates_per_keypoint, keypoint_depths=keypoint_depths)) # Get the refined keypoints and scores, of shape # [1, num_instances, num_keypoints, 2] and # [1, num_instances, num_keypoints], respectively. (refined_keypoints, refined_scores, refined_depths) = refine_keypoints( regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, num_keypoint_candidates, bboxes=boxes_slice, unmatched_keypoint_score=kp_params.unmatched_keypoint_score, box_scale=kp_params.box_scale, candidate_search_scale=kp_params.candidate_search_scale, candidate_ranking_mode=kp_params.candidate_ranking_mode, score_distance_offset=kp_params.score_distance_offset, keypoint_depth_candidates=keypoint_depth_candidates, keypoint_score_threshold=( kp_params.keypoint_candidate_score_threshold)) return refined_keypoints, refined_scores, refined_depths def regularization_losses(self): return [] def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): raise RuntimeError('CenterNetMetaArch not supported under TF1.x.') def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (not implemented in CenterNet) is intended to be used to restore Slim-based models when running Tensorflow 1.x. TODO(jonathanhuang): Make this function consistent with other meta-architectures. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`, `fine_tune`. Default 'detection'. 'detection': used when loading models pre-trained on other detection tasks. With this checkpoint type the weights of the feature extractor are expected under the attribute 'feature_extractor'. 'classification': used when loading models pre-trained on an image classification task. Note that only the encoder section of the network is loaded and not the upsampling layers. With this checkpoint type, the weights of only the encoder section are expected under the attribute 'feature_extractor'. 'fine_tune': used when loading the entire CenterNet feature extractor pre-trained on other tasks. The checkpoints saved during CenterNet model training can be directly loaded using this type. With this checkpoint type, the weights of the feature extractor are expected under the attribute 'model._feature_extractor'. For more details, see the tensorflow section on Loading mechanics. https://www.tensorflow.org/guide/checkpoint#loading_mechanics Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'detection': feature_extractor_model = tf.train.Checkpoint( _feature_extractor=self._feature_extractor) return {'model': feature_extractor_model} elif fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'full': return {'model': self} elif fine_tune_checkpoint_type == 'fine_tune': raise ValueError(('"fine_tune" is no longer supported for CenterNet. ' 'Please set fine_tune_checkpoint_type to "detection"' ' which has the same functionality. If you are using' ' the ExtremeNet checkpoint, download the new version' ' from the model zoo.')) else: raise ValueError('Unknown fine tune checkpoint type {}'.format( fine_tune_checkpoint_type)) def updates(self): if tf_version.is_tf2(): raise RuntimeError('This model is intended to be used with model_lib_v2 ' 'which does not support updates()') else: update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/center_net_meta_arch.py
center_net_meta_arch.py
from __future__ import print_function import abc import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import box_predictor from object_detection.core import losses from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import variables_helper _UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' class FasterRCNNFeatureExtractor(object): """Faster R-CNN Feature Extractor definition.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. first_stage_features_stride: Output stride of extracted RPN feature map. batch_norm_trainable: Whether to update batch norm parameters during training or not. When training with a relative large batch size (e.g. 8), it could be desirable to enable batch norm update. reuse_weights: Whether to reuse variables. Default is None. weight_decay: float weight decay for feature extractor (default: 0.0). """ self._is_training = is_training self._first_stage_features_stride = first_stage_features_stride self._train_batch_norm = (batch_norm_trainable and is_training) self._reuse_weights = tf.AUTO_REUSE if reuse_weights else None self._weight_decay = weight_decay @abc.abstractmethod def preprocess(self, resized_inputs): """Feature-extractor specific preprocessing (minus image resizing).""" pass def extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. This function is responsible for extracting feature maps from preprocessed images. These features are used by the region proposal network (RPN) to predict proposals. Args: preprocessed_inputs: A [batch, height, width, channels] float tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] activations: A dictionary mapping activation tensor names to tensors. """ with tf.variable_scope(scope, values=[preprocessed_inputs]): return self._extract_proposal_features(preprocessed_inputs, scope) @abc.abstractmethod def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features, to be overridden.""" pass def extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope( scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE): return self._extract_box_classifier_features(proposal_feature_maps, scope) @abc.abstractmethod def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features, to be overridden.""" pass def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): for scope_name in [first_stage_feature_extractor_scope, second_stage_feature_extractor_scope]: if variable.op.name.startswith(scope_name): var_name = variable.op.name.replace(scope_name + '/', '') variables_to_restore[var_name] = variable return variables_to_restore class FasterRCNNKerasFeatureExtractor(object): """Keras-based Faster R-CNN Feature Extractor definition.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, weight_decay=0.0): """Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. first_stage_features_stride: Output stride of extracted RPN feature map. batch_norm_trainable: Whether to update batch norm parameters during training or not. When training with a relative large batch size (e.g. 8), it could be desirable to enable batch norm update. weight_decay: float weight decay for feature extractor (default: 0.0). """ self._is_training = is_training self._first_stage_features_stride = first_stage_features_stride self._train_batch_norm = (batch_norm_trainable and is_training) self._weight_decay = weight_decay @abc.abstractmethod def preprocess(self, resized_inputs): """Feature-extractor specific preprocessing (minus image resizing).""" pass @abc.abstractmethod def get_proposal_feature_extractor_model(self, name): """Get model that extracts first stage RPN features, to be overridden.""" pass @abc.abstractmethod def get_box_classifier_feature_extractor_model(self, name): """Get model that extracts second stage box classifier features.""" pass class FasterRCNNMetaArch(model.DetectionModel): """Faster R-CNN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=1.0, hard_example_miner=None, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=True, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False): """FasterRCNNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable takes a rank-3 image tensor of shape [height, width, channels] (corresponding to a single image), an optional rank-3 instance mask tensor of shape [num_masks, height, width] and returns a resized rank-3 image tensor, a resized mask tensor if one was provided in the input. In addition this callable must also return a 1-D tensor of the form [height, width, channels] containing the size of the true image, as the image resizer can perform zero padding. See protos/image_resizer.proto. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: An integer values taking values in {1, 2, 3}. If 1, the function will construct only the Region Proposal Network (RPN) part of the model. If 2, the function will perform box refinement and other auxiliary predictions all in the second stage. If 3, it will extract features from refined boxes and perform the auxiliary predictions on the non-maximum suppressed refined boxes. If is_training is true and the value of number_of_stages is 3, it is reduced to 2 since all the model heads are trained in parallel in second stage during training. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of Faster R-CNN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: Sampler to use for first stage loss (RPN loss). first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. initial_crop_size: A single integer indicating the output size (width and height are set to be the same) of the initial bilinear interpolation based cropping during ROI pooling. maxpool_kernel_size: A single integer indicating the kernel size of the max pool op on the cropped feature map during ROI pooling. maxpool_stride: A single integer indicating the stride of the max pool op on the cropped feature map during ROI pooling. second_stage_target_assigner: Target assigner to use for second stage of Faster R-CNN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for the second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: Sampler to use for second stage loss (box classifier loss). second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float indicating the scale factor for second stage localization loss. second_stage_classification_loss_weight: A float indicating the scale factor for second stage classification loss. second_stage_classification_loss: Classification loss used by the second stage classifier. Either losses.WeightedSigmoidClassificationLoss or losses.WeightedSoftmaxClassificationLoss. second_stage_mask_prediction_loss_weight: A float indicating the scale factor for second stage mask prediction loss. This is applicable only if second stage box predictor is configured to predict masks. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: Normally, anchors generated for a given image size are pruned during training if they lie outside the image window. This option clips the anchors to be within the image instead of pruning. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters in the first stage box predictor during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the rpn feature map and passes it through box_classifier then returns in the output dict as `detection_features`. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map and returns in the output dict as `detection_features`. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at training time. ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ # TODO(rathodv): add_summaries is currently unused. Respect that directive # in the future. super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes) self._is_training = is_training self._image_resizer_fn = image_resizer_fn self._resize_masks = resize_masks self._feature_extractor = feature_extractor if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor): # We delay building the feature extractor until it is used, # to avoid creating the variables when a model is built just for data # preprocessing. (This prevents a subtle bug where variable names are # mismatched across workers, causing only one worker to be able to train) self._feature_extractor_for_proposal_features = ( _UNINITIALIZED_FEATURE_EXTRACTOR) self._feature_extractor_for_box_classifier_features = ( _UNINITIALIZED_FEATURE_EXTRACTOR) else: self._feature_extractor_for_proposal_features = None self._feature_extractor_for_box_classifier_features = None self._number_of_stages = number_of_stages self._proposal_target_assigner = first_stage_target_assigner self._detector_target_assigner = second_stage_target_assigner # Both proposal and detector target assigners use the same box coder self._box_coder = self._proposal_target_assigner.box_coder # (First stage) Region proposal network parameters self._first_stage_anchor_generator = first_stage_anchor_generator self._first_stage_atrous_rate = first_stage_atrous_rate self._first_stage_box_predictor_depth = first_stage_box_predictor_depth self._first_stage_box_predictor_kernel_size = ( first_stage_box_predictor_kernel_size) self._first_stage_minibatch_size = first_stage_minibatch_size self._first_stage_sampler = first_stage_sampler if isinstance(first_stage_box_predictor_arg_scope_fn, hyperparams_builder.KerasLayerHyperparams): num_anchors_per_location = ( self._first_stage_anchor_generator.num_anchors_per_location()) conv_hyperparams = ( first_stage_box_predictor_arg_scope_fn) self._first_stage_box_predictor_first_conv = ( tf.keras.Sequential([ tf.keras.layers.Conv2D( self._first_stage_box_predictor_depth, kernel_size=[self._first_stage_box_predictor_kernel_size, self._first_stage_box_predictor_kernel_size], dilation_rate=self._first_stage_atrous_rate, padding='SAME', name='RPNConv', **conv_hyperparams.params()), conv_hyperparams.build_batch_norm( (self._is_training and not freeze_batchnorm), name='RPNBatchNorm'), tf.keras.layers.Lambda( tf.nn.relu6, name='RPNActivation') ], name='FirstStageRPNFeatures')) self._first_stage_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=self._is_training, num_classes=1, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=num_anchors_per_location, use_dropout=False, dropout_keep_prob=1.0, box_code_size=self._box_coder.code_size, kernel_size=1, num_layers_before_predictor=0, min_depth=0, max_depth=0, name=self.first_stage_box_predictor_scope)) else: self._first_stage_box_predictor_arg_scope_fn = ( first_stage_box_predictor_arg_scope_fn) def rpn_box_predictor_feature_extractor(single_rpn_features_to_crop): with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()): return slim.conv2d( single_rpn_features_to_crop, self._first_stage_box_predictor_depth, kernel_size=[ self._first_stage_box_predictor_kernel_size, self._first_stage_box_predictor_kernel_size ], rate=self._first_stage_atrous_rate, activation_fn=tf.nn.relu6, scope='Conv', reuse=tf.AUTO_REUSE) self._first_stage_box_predictor_first_conv = ( rpn_box_predictor_feature_extractor) self._first_stage_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=self._is_training, num_classes=1, conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn, use_dropout=False, dropout_keep_prob=1.0, box_code_size=self._box_coder.code_size, kernel_size=1, num_layers_before_predictor=0, min_depth=0, max_depth=0)) self._first_stage_nms_fn = first_stage_non_max_suppression_fn self._first_stage_max_proposals = first_stage_max_proposals self._use_static_shapes = use_static_shapes self._first_stage_localization_loss = ( losses.WeightedSmoothL1LocalizationLoss()) self._first_stage_objectness_loss = ( losses.WeightedSoftmaxClassificationLoss()) self._first_stage_loc_loss_weight = first_stage_localization_loss_weight self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight # Per-region cropping parameters self._crop_and_resize_fn = crop_and_resize_fn self._initial_crop_size = initial_crop_size self._maxpool_kernel_size = maxpool_kernel_size self._maxpool_stride = maxpool_stride # If max pooling is to be used, build the layer if maxpool_kernel_size: self._maxpool_layer = tf.keras.layers.MaxPooling2D( [self._maxpool_kernel_size, self._maxpool_kernel_size], strides=self._maxpool_stride, name='MaxPool2D') self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor self._second_stage_batch_size = second_stage_batch_size self._second_stage_sampler = second_stage_sampler self._second_stage_nms_fn = second_stage_non_max_suppression_fn self._second_stage_score_conversion_fn = second_stage_score_conversion_fn self._second_stage_localization_loss = ( losses.WeightedSmoothL1LocalizationLoss()) self._second_stage_classification_loss = second_stage_classification_loss self._second_stage_mask_loss = ( losses.WeightedSigmoidClassificationLoss()) self._second_stage_loc_loss_weight = second_stage_localization_loss_weight self._second_stage_cls_loss_weight = second_stage_classification_loss_weight self._second_stage_mask_loss_weight = ( second_stage_mask_prediction_loss_weight) self._hard_example_miner = hard_example_miner self._parallel_iterations = parallel_iterations self.clip_anchors_to_image = clip_anchors_to_image if self._number_of_stages <= 0 or self._number_of_stages > 3: raise ValueError('Number of stages should be a value in {1, 2, 3}.') self._batched_prediction_tensor_names = [] self._return_raw_detections_during_predict = ( return_raw_detections_during_predict) self._output_final_box_features = output_final_box_features self._output_final_box_rpn_features = output_final_box_rpn_features @property def first_stage_feature_extractor_scope(self): return 'FirstStageFeatureExtractor' @property def second_stage_feature_extractor_scope(self): return 'SecondStageFeatureExtractor' @property def first_stage_box_predictor_scope(self): return 'FirstStageBoxPredictor' @property def second_stage_box_predictor_scope(self): return 'SecondStageBoxPredictor' @property def max_num_proposals(self): """Max number of proposals (to pad to) for each image in the input batch. At training time, this is set to be the `second_stage_batch_size` if hard example miner is not configured, else it is set to `first_stage_max_proposals`. At inference time, this is always set to `first_stage_max_proposals`. Returns: A positive integer. """ if self._is_training and not self._hard_example_miner: return self._second_stage_batch_size return self._first_stage_max_proposals @property def anchors(self): if not self._anchors: raise RuntimeError('anchors have not been constructed yet!') if not isinstance(self._anchors, box_list.BoxList): raise RuntimeError('anchors should be a BoxList object, but is not.') return self._anchors @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names @property def feature_extractor(self): return self._feature_extractor def preprocess(self, inputs): """Feature-extractor specific preprocessing. See base class. For Faster R-CNN, we perform image resizing in the base class --- each class subclassing FasterRCNNMetaArch is responsible for any additional preprocessing (e.g., scaling pixel values to be in [-1, 1]). Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Raises: ValueError: if inputs tensor does not have type tf.float32 """ with tf.name_scope('Preprocessor'): (resized_inputs, true_image_shapes) = shape_utils.resize_images_and_return_shapes( inputs, self._image_resizer_fn) return (self._feature_extractor.preprocess(resized_inputs), true_image_shapes) def _compute_clip_window(self, image_shapes): """Computes clip window for non max suppression based on image shapes. This function assumes that the clip window's left top corner is at (0, 0). Args: image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing shapes of images in the batch. Each row represents [height, width, channels] of an image. Returns: A 2-D float32 tensor of shape [batch_size, 4] containing the clip window for each image in the form [ymin, xmin, ymax, xmax]. """ clip_heights = image_shapes[:, 0] clip_widths = image_shapes[:, 1] clip_window = tf.cast( tf.stack([ tf.zeros_like(clip_heights), tf.zeros_like(clip_heights), clip_heights, clip_widths ], axis=1), dtype=tf.float32) return clip_window def _proposal_postprocess(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes): """Wraps over FasterRCNNMetaArch._postprocess_rpn().""" image_shape_2d = self._image_batch_shape_2d(image_shape) proposal_boxes_normalized, _, _, num_proposals, _, _ = \ self._postprocess_rpn( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape_2d, true_image_shapes) return proposal_boxes_normalized, num_proposals def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): """Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield "raw" un-postprocessed predictions. If `number_of_stages` is 1, this function only returns first stage RPN predictions (un-postprocessed). Otherwise it returns both first stage RPN predictions as well as second stage box classifier predictions. Other remarks: + Anchor pruning vs. clipping: following the recommendation of the Faster R-CNN paper, we prune anchors that venture outside the image window at training time and clip anchors to the image window at inference time. + Proposal padding: as described at the top of the file, proposals are padded to self._max_num_proposals and flattened so that proposals from all images within the input batch are arranged along the same batch dimension. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch_size, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. 2) rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. 3) image_shape: a 1-D tensor of shape [4] representing the input image shape. 4) rpn_box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN (in absolute coordinates). Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. 7) feature_maps: A single element list containing a 4-D float32 tensor with shape batch_size, height, width, depth] representing the RPN features to crop. (and if number_of_stages > 1): 8) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 9) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 10) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 11) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 12) mask_predictions: (optional) a 4-D tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask predictions. 13) raw_detection_boxes: (optional) a [batch_size, self.max_num_proposals, num_classes, 4] float32 tensor with detections prior to NMS in normalized coordinates. 14) raw_detection_feature_map_indices: (optional) a [batch_size, self.max_num_proposals, num_classes] int32 tensor with indices indicating which feature map each raw detection box was produced from. The indices correspond to the elements in the 'feature_maps' field. Raises: ValueError: If `predict` is called before `preprocess`. """ prediction_dict = self._predict_first_stage(preprocessed_inputs) if self._number_of_stages >= 2: prediction_dict.update( self._predict_second_stage( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['rpn_features_to_crop'], prediction_dict['anchors'], prediction_dict['image_shape'], true_image_shapes, **side_inputs)) if self._number_of_stages == 3: prediction_dict = self._predict_third_stage(prediction_dict, true_image_shapes) self._batched_prediction_tensor_names = [ x for x in prediction_dict if x not in ('image_shape', 'anchors') ] return prediction_dict def _predict_first_stage(self, preprocessed_inputs): """First stage of prediction. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) rpn_box_predictor_features: A list of 4-D float32/bfloat16 tensor with shape [batch_size, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. 2) rpn_features_to_crop: A list of 4-D float32/bfloat16 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. 3) image_shape: a 1-D tensor of shape [4] representing the input image shape. 4) rpn_box_encodings: 3-D float32 tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. 5) rpn_objectness_predictions_with_background: 3-D float32 tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN (in absolute coordinates). Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. 7) feature_maps: A single element list containing a 4-D float32 tensor with shape batch_size, height, width, depth] representing the RPN features to crop. """ (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs) (rpn_box_encodings, rpn_objectness_predictions_with_background ) = self._predict_rpn_proposals(rpn_box_predictor_features) # The Faster R-CNN paper recommends pruning anchors that venture outside # the image window at training time and clipping at inference time. clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]), dtype=tf.float32) if self._is_training: if self.clip_anchors_to_image: anchors_boxlist = box_list_ops.clip_to_window( anchors_boxlist, clip_window, filter_nonoverlapping=False) else: (rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist) = self._remove_invalid_anchors_and_predictions( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist, clip_window) else: anchors_boxlist = box_list_ops.clip_to_window( anchors_boxlist, clip_window, filter_nonoverlapping=not self._use_static_shapes) self._anchors = anchors_boxlist prediction_dict = { 'rpn_box_predictor_features': rpn_box_predictor_features, 'rpn_features_to_crop': rpn_features_to_crop, 'image_shape': image_shape, 'rpn_box_encodings': tf.cast(rpn_box_encodings, dtype=tf.float32), 'rpn_objectness_predictions_with_background': tf.cast(rpn_objectness_predictions_with_background, dtype=tf.float32), 'anchors': anchors_boxlist.data['boxes'], fields.PredictionFields.feature_maps: rpn_features_to_crop } return prediction_dict def _image_batch_shape_2d(self, image_batch_shape_1d): """Takes a 1-D image batch shape tensor and converts it to a 2-D tensor. Example: If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D image batch tensor would be [[300, 300, 3], [300, 300, 3]] Args: image_batch_shape_1d: 1-D tensor of the form [batch_size, height, width, channels]. Returns: image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is of the form [height, width, channels]. """ return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0), [image_batch_shape_1d[0], 1]) def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. """ proposal_boxes_normalized, num_proposals = self._proposal_postprocess( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes) prediction_dict = self._box_prediction(rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, **side_inputs) prediction_dict['num_proposals'] = num_proposals return prediction_dict def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. proposal_boxes_normalized: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 4) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 5) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 6) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. 8) final_anchors: a 3-D float tensor of shape [batch_size, self.max_num_proposals, 4] containing the reference anchors for raw detection boxes in normalized coordinates. """ flattened_proposal_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, proposal_boxes_normalized, image_shape, **side_inputs)) box_classifier_features = self._extract_box_classifier_features( flattened_proposal_feature_maps, **side_inputs) if self._mask_rcnn_box_predictor.is_keras_model: box_predictions = self._mask_rcnn_box_predictor( [box_classifier_features], prediction_stage=2) else: box_predictions = self._mask_rcnn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=2) refined_box_encodings = tf.squeeze( box_predictions[box_predictor.BOX_ENCODINGS], axis=1, name='all_refined_box_encodings') class_predictions_with_background = tf.squeeze( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1, name='all_class_predictions_with_background') absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, self._parallel_iterations) prediction_dict = { 'refined_box_encodings': tf.cast(refined_box_encodings, dtype=tf.float32), 'class_predictions_with_background': tf.cast(class_predictions_with_background, dtype=tf.float32), 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': proposal_boxes_normalized } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) return prediction_dict def _raw_detections_and_feature_map_inds( self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes): """Returns raw detections and feat map inds from where they originated. Args: refined_box_encodings: [total_num_proposals, num_classes, self._box_coder.code_size] float32 tensor. absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32 tensor representing decoded proposal bounding boxes in absolute coordinates. true_image_shapes: [batch, 3] int32 tensor where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: A dictionary with raw detection boxes, and the feature map indices from which they originated. """ box_encodings_batch = tf.reshape( refined_box_encodings, [-1, self.max_num_proposals, refined_box_encodings.shape[1], self._box_coder.code_size]) raw_detection_boxes_absolute = self._batch_decode_boxes( box_encodings_batch, absolute_proposal_boxes) raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn( self._normalize_and_clip_boxes, elems=[raw_detection_boxes_absolute, true_image_shapes], dtype=tf.float32) detection_feature_map_indices = tf.zeros_like( raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32) return { fields.PredictionFields.raw_detection_boxes: raw_detection_boxes_normalized, fields.PredictionFields.raw_detection_feature_map_indices: detection_feature_map_indices } def _extract_box_classifier_features(self, flattened_feature_maps): if self._feature_extractor_for_box_classifier_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_box_classifier_features = ( self._feature_extractor.get_box_classifier_feature_extractor_model( name=self.second_stage_feature_extractor_scope)) if self._feature_extractor_for_box_classifier_features: box_classifier_features = ( self._feature_extractor_for_box_classifier_features( flattened_feature_maps)) else: box_classifier_features = ( self._feature_extractor.extract_box_classifier_features( flattened_feature_maps, scope=self.second_stage_feature_extractor_scope)) return box_classifier_features def _predict_third_stage(self, prediction_dict, image_shapes): """Predicts non-box, non-class outputs using refined detections. For training, masks as predicted directly on the box_classifier_features, which are region-features from the initial anchor boxes. For inference, this happens after calling the post-processing stage, such that masks are only calculated for the top scored boxes. Args: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) box_classifier_features: a 4-D float32 tensor representing the features for each proposal. 6) image_shape: a 1-D tensor of shape [4] representing the input image shape. image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing shapes of images in the batch. Returns: prediction_dict: a dictionary that in addition to the input predictions does hold the following predictions as well: 1) mask_predictions: a 4-D tensor with shape [batch_size, max_detection, mask_height, mask_width] containing instance mask predictions. """ if self._is_training: curr_box_classifier_features = prediction_dict['box_classifier_features'] detection_classes = prediction_dict['class_predictions_with_background'] if self._mask_rcnn_box_predictor.is_keras_model: mask_predictions = self._mask_rcnn_box_predictor( [curr_box_classifier_features], prediction_stage=3) else: mask_predictions = self._mask_rcnn_box_predictor.predict( [curr_box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=3) prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[ box_predictor.MASK_PREDICTIONS], axis=1) else: detections_dict = self._postprocess_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], image_shapes) prediction_dict.update(detections_dict) detection_boxes = detections_dict[ fields.DetectionResultFields.detection_boxes] detection_classes = detections_dict[ fields.DetectionResultFields.detection_classes] rpn_features_to_crop = prediction_dict['rpn_features_to_crop'] image_shape = prediction_dict['image_shape'] batch_size = tf.shape(detection_boxes)[0] max_detection = tf.shape(detection_boxes)[1] flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) curr_box_classifier_features = self._extract_box_classifier_features( flattened_detected_feature_maps) if self._mask_rcnn_box_predictor.is_keras_model: mask_predictions = self._mask_rcnn_box_predictor( [curr_box_classifier_features], prediction_stage=3) else: mask_predictions = self._mask_rcnn_box_predictor.predict( [curr_box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=3) detection_masks = tf.squeeze(mask_predictions[ box_predictor.MASK_PREDICTIONS], axis=1) _, num_classes, mask_height, mask_width = ( detection_masks.get_shape().as_list()) _, max_detection = detection_classes.get_shape().as_list() prediction_dict['mask_predictions'] = tf.reshape( detection_masks, [-1, num_classes, mask_height, mask_width]) if num_classes > 1: detection_masks = self._gather_instance_masks( detection_masks, detection_classes) detection_masks = tf.cast(detection_masks, tf.float32) prediction_dict[fields.DetectionResultFields.detection_masks] = ( tf.reshape(tf.sigmoid(detection_masks), [batch_size, max_detection, mask_height, mask_width])) return prediction_dict def _gather_instance_masks(self, instance_masks, classes): """Gathers the masks that correspond to classes. Args: instance_masks: A 4-D float32 tensor with shape [K, num_classes, mask_height, mask_width]. classes: A 2-D int32 tensor with shape [batch_size, max_detection]. Returns: masks: a 3-D float32 tensor with shape [K, mask_height, mask_width]. """ _, num_classes, height, width = instance_masks.get_shape().as_list() k = tf.shape(instance_masks)[0] instance_masks = tf.reshape(instance_masks, [-1, height, width]) classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32) gather_idx = tf.range(k) * num_classes + classes return tf.gather(instance_masks, gather_idx) def _extract_rpn_feature_maps(self, preprocessed_inputs): """Extracts RPN features. This function extracts two feature maps: a feature map to be directly fed to a box predictor (to predict location and objectness scores for proposals) and a feature map from which to crop regions which will then be sent to the second stage box classifier. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. Returns: rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. anchors: A list of BoxList representing anchors (for the RPN) in absolute coordinates. image_shape: A 1-D tensor representing the input image shape. """ image_shape = tf.shape(preprocessed_inputs) rpn_features_to_crop, self.endpoints = self._extract_proposal_features( preprocessed_inputs) # Decide if rpn_features_to_crop is a list. If not make it a list if not isinstance(rpn_features_to_crop, list): rpn_features_to_crop = [rpn_features_to_crop] feature_map_shapes = [] rpn_box_predictor_features = [] for single_rpn_features_to_crop in rpn_features_to_crop: single_shape = tf.shape(single_rpn_features_to_crop) feature_map_shapes.append((single_shape[1], single_shape[2])) single_rpn_box_predictor_features = ( self._first_stage_box_predictor_first_conv( single_rpn_features_to_crop)) rpn_box_predictor_features.append(single_rpn_box_predictor_features) anchors = box_list_ops.concatenate( self._first_stage_anchor_generator.generate(feature_map_shapes)) return (rpn_box_predictor_features, rpn_features_to_crop, anchors, image_shape) def _extract_proposal_features(self, preprocessed_inputs): if self._feature_extractor_for_proposal_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_proposal_features = ( self._feature_extractor.get_proposal_feature_extractor_model( name=self.first_stage_feature_extractor_scope)) if self._feature_extractor_for_proposal_features: proposal_features = ( self._feature_extractor_for_proposal_features(preprocessed_inputs), {}) else: proposal_features = ( self._feature_extractor.extract_proposal_features( preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)) return proposal_features def _predict_rpn_proposals(self, rpn_box_predictor_features): """Adds box predictors to RPN feature map to predict proposals. Note resulting tensors will not have been postprocessed. Args: rpn_box_predictor_features: A list of 4-D float32 tensor with shape [batch, height_i, width_j, depth] to be used for predicting proposal boxes and corresponding objectness scores. Returns: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). Raises: RuntimeError: if the anchor generator generates anchors corresponding to multiple feature maps. We currently assume that a single feature map is generated for the RPN. """ num_anchors_per_location = ( self._first_stage_anchor_generator.num_anchors_per_location()) if self._first_stage_box_predictor.is_keras_model: box_predictions = self._first_stage_box_predictor( rpn_box_predictor_features) else: box_predictions = self._first_stage_box_predictor.predict( rpn_box_predictor_features, num_anchors_per_location, scope=self.first_stage_box_predictor_scope) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.squeeze(box_encodings, axis=2), objectness_predictions_with_background) def _remove_invalid_anchors_and_predictions( self, box_encodings, objectness_predictions_with_background, anchors_boxlist, clip_window): """Removes anchors that (partially) fall outside an image. Also removes associated box encodings and objectness predictions. Args: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN) in absolute coordinates. clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax] extent of the window to clip/prune to. Returns: box_encodings: 4-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes, where num_valid_anchors <= num_anchors objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors, where num_valid_anchors <= num_anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in absolute coordinates. """ pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window( anchors_boxlist, clip_window) def _batch_gather_kept_indices(predictions_tensor): return shape_utils.static_or_dynamic_map_fn( functools.partial(tf.gather, indices=keep_indices), elems=predictions_tensor, dtype=tf.float32, parallel_iterations=self._parallel_iterations, back_prop=True) return (_batch_gather_kept_indices(box_encodings), _batch_gather_kept_indices(objectness_predictions_with_background), pruned_anchors_boxlist) def _flatten_first_two_dimensions(self, inputs): """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor. Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape [A * B, ..., depth]. Args: inputs: A float tensor with shape [A, B, ..., depth]. Note that the first two and last dimensions must be statically defined. Returns: A float tensor with shape [A * B, ..., depth] (where the first and last dimension are statically defined. """ combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs) flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] + combined_shape[2:]) return tf.reshape(inputs, flattened_shape) def postprocess(self, prediction_dict, true_image_shapes): """Convert prediction tensors to final detections. This function converts raw predictions tensors to final detection results. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_converter is used, then scores are remapped (and may thus have a different interpretation). If number_of_stages=1, the returned results represent proposals from the first stage RPN and are padded to have self.max_num_proposals for each image; otherwise, the results can be interpreted as multiclass detections from the full two-stage model and are padded to self._max_detections. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If number_of_stages=1, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, `proposal_boxes` and, optionally, `mask_predictions` fields. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detection, 4] detection_scores: [batch, max_detections] detection_multiclass_scores: [batch, max_detections, 2] detection_anchor_indices: [batch, max_detections] detection_classes: [batch, max_detections] (this entry is only created if rpn_mode=False) num_detections: [batch] raw_detection_boxes: [batch, total_detections, 4] raw_detection_scores: [batch, total_detections, num_classes + 1] Raises: ValueError: If `predict` is called before `preprocess`. ValueError: If `_output_final_box_features` is true but rpn_features_to_crop is not in the prediction_dict. """ with tf.name_scope('FirstStagePostprocessor'): if self._number_of_stages == 1: image_shapes = self._image_batch_shape_2d( prediction_dict['image_shape']) (proposal_boxes, proposal_scores, proposal_multiclass_scores, num_proposals, raw_proposal_boxes, raw_proposal_scores) = self._postprocess_rpn( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], image_shapes, true_image_shapes) return { fields.DetectionResultFields.detection_boxes: proposal_boxes, fields.DetectionResultFields.detection_scores: proposal_scores, fields.DetectionResultFields.detection_multiclass_scores: proposal_multiclass_scores, fields.DetectionResultFields.num_detections: tf.cast(num_proposals, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: raw_proposal_boxes, fields.DetectionResultFields.raw_detection_scores: raw_proposal_scores } # TODO(jrru): Remove mask_predictions from _post_process_box_classifier. if (self._number_of_stages == 2 or (self._number_of_stages == 3 and self._is_training)): with tf.name_scope('SecondStagePostprocessor'): mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS) detections_dict = self._postprocess_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], true_image_shapes, mask_predictions=mask_predictions) if self._output_final_box_features: if 'rpn_features_to_crop' not in prediction_dict: raise ValueError( 'Please make sure rpn_features_to_crop is in the prediction_dict.' ) detections_dict[ 'detection_features'] = ( self._add_detection_box_boxclassifier_features_output_node( detections_dict[ fields.DetectionResultFields.detection_boxes], prediction_dict['rpn_features_to_crop'], prediction_dict['image_shape'])) if self._output_final_box_rpn_features: if 'rpn_features_to_crop' not in prediction_dict: raise ValueError( 'Please make sure rpn_features_to_crop is in the prediction_dict.' ) detections_dict['cropped_rpn_box_features'] = ( self._add_detection_box_rpn_features_output_node( detections_dict[fields.DetectionResultFields.detection_boxes], prediction_dict['rpn_features_to_crop'], prediction_dict['image_shape'])) return detections_dict if self._number_of_stages == 3: # Post processing is already performed in 3rd stage. We need to transfer # postprocessed tensors from `prediction_dict` to `detections_dict`. # Remove any items from the prediction dictionary if they are not pure # Tensors. non_tensor_predictions = [ k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)] for k in non_tensor_predictions: tf.logging.info('Removing {0} from prediction_dict'.format(k)) prediction_dict.pop(k) return prediction_dict def _add_detection_box_boxclassifier_features_output_node( self, detection_boxes, rpn_features_to_crop, image_shape): """Add detection features to outputs. This function extracts box features for each box in rpn_features_to_crop. It returns the extracted box features, reshaped to [batch size, max_detections, height, width, depth], and average pools the extracted features across the spatial dimensions and adds a graph node to the pooled features named 'pooled_detection_features' Args: detection_boxes: a 3-D float32 tensor of shape [batch_size, max_detections, 4] which represents the bounding boxes. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. Returns: detection_features: a 4-D float32 tensor of shape [batch size, max_detections, height, width, depth] representing cropped image features """ with tf.name_scope('SecondStageDetectionFeaturesExtract'): flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) detection_features_unpooled = self._extract_box_classifier_features( flattened_detected_feature_maps) batch_size = tf.shape(detection_boxes)[0] max_detections = tf.shape(detection_boxes)[1] detection_features_pool = tf.reduce_mean( detection_features_unpooled, axis=[1, 2]) reshaped_detection_features_pool = tf.reshape( detection_features_pool, [batch_size, max_detections, tf.shape(detection_features_pool)[-1]]) reshaped_detection_features_pool = tf.identity( reshaped_detection_features_pool, 'pooled_detection_features') # TODO(sbeery) add node to extract rpn features here!! reshaped_detection_features = tf.reshape( detection_features_unpooled, [batch_size, max_detections, tf.shape(detection_features_unpooled)[1], tf.shape(detection_features_unpooled)[2], tf.shape(detection_features_unpooled)[3]]) return reshaped_detection_features def _add_detection_box_rpn_features_output_node(self, detection_boxes, rpn_features_to_crop, image_shape): """Add detection features to outputs. This function extracts box features for each box in rpn_features_to_crop. It returns the extracted box features, reshaped to [batch size, max_detections, height, width, depth] Args: detection_boxes: a 3-D float32 tensor of shape [batch_size, max_detections, 4] which represents the bounding boxes. rpn_features_to_crop: A list of 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. Returns: detection_features: a 4-D float32 tensor of shape [batch size, max_detections, height, width, depth] representing cropped image features """ with tf.name_scope('FirstStageDetectionFeaturesExtract'): flattened_detected_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, detection_boxes, image_shape)) batch_size = tf.shape(detection_boxes)[0] max_detections = tf.shape(detection_boxes)[1] reshaped_detection_features = tf.reshape( flattened_detected_feature_maps, [batch_size, max_detections, tf.shape(flattened_detected_feature_maps)[1], tf.shape(flattened_detected_feature_maps)[2], tf.shape(flattened_detected_feature_maps)[3]]) return reshaped_detection_features def _postprocess_rpn(self, rpn_box_encodings_batch, rpn_objectness_predictions_with_background_batch, anchors, image_shapes, true_image_shapes): """Converts first stage prediction tensors from the RPN to proposals. This function decodes the raw RPN predictions, runs non-max suppression on the result. Note that the behavior of this function is slightly modified during training --- specifically, we stop the gradient from passing through the proposal boxes and we only return a balanced sampled subset of proposals with size `second_stage_batch_size`. Args: rpn_box_encodings_batch: A 3-D float32 tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background_batch: A 3-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of images in the batch. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: proposal_boxes: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. proposal_scores: A float tensor with shape [batch_size, max_num_proposals] representing the (potentially zero padded) proposal objectness scores for all images in the batch. proposal_multiclass_scores: A float tensor with shape [batch_size, max_num_proposals, 2] representing the (potentially zero padded) proposal multiclass scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. raw_detection_boxes: [batch, total_detections, 4] tensor with decoded proposal boxes before Non-Max Suppression. raw_detection_scores: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw proposal boxes. """ rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2) rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape( rpn_box_encodings_batch) tiled_anchor_boxes = tf.tile( tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1]) proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch, tiled_anchor_boxes) raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2) rpn_objectness_softmax = tf.nn.softmax( rpn_objectness_predictions_with_background_batch) rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1] clip_window = self._compute_clip_window(true_image_shapes) additional_fields = {'multiclass_scores': rpn_objectness_softmax} (proposal_boxes, proposal_scores, _, _, nmsed_additional_fields, num_proposals) = self._first_stage_nms_fn( tf.expand_dims(raw_proposal_boxes, axis=2), tf.expand_dims(rpn_objectness_softmax_without_background, axis=2), additional_fields=additional_fields, clip_window=clip_window) if self._is_training: proposal_boxes = tf.stop_gradient(proposal_boxes) if not self._hard_example_miner: (groundtruth_boxlists, groundtruth_classes_with_background_list, _, groundtruth_weights_list ) = self._format_groundtruth_data(image_shapes) (proposal_boxes, proposal_scores, num_proposals) = self._sample_box_classifier_batch( proposal_boxes, proposal_scores, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) # normalize proposal boxes def normalize_boxes(args): proposal_boxes_per_image = args[0] image_shape = args[1] normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( box_list.BoxList(proposal_boxes_per_image), image_shape[0], image_shape[1], check_range=False).get() return normalized_boxes_per_image normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32) raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( normalize_boxes, elems=[raw_proposal_boxes, image_shapes], dtype=tf.float32) proposal_multiclass_scores = ( nmsed_additional_fields.get('multiclass_scores') if nmsed_additional_fields else None) return (normalized_proposal_boxes, proposal_scores, proposal_multiclass_scores, num_proposals, raw_normalized_proposal_boxes, rpn_objectness_softmax) def _sample_box_classifier_batch( self, proposal_boxes, proposal_scores, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Samples a minibatch for second stage. Args: proposal_boxes: A float tensor with shape [batch_size, num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented in absolute coordinates. proposal_scores: A float tensor with shape [batch_size, num_proposals] representing the (potentially zero padded) proposal objectness scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes] indicating the weight associated with the groundtruth boxes. Returns: proposal_boxes: A float tensor with shape [batch_size, second_stage_batch_size, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented in absolute coordinates. proposal_scores: A float tensor with shape [batch_size, second_stage_batch_size] representing the (potentially zero padded) proposal objectness scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. """ single_image_proposal_box_sample = [] single_image_proposal_score_sample = [] single_image_num_proposals_sample = [] for (single_image_proposal_boxes, single_image_proposal_scores, single_image_num_proposals, single_image_groundtruth_boxlist, single_image_groundtruth_classes_with_background, single_image_groundtruth_weights) in zip( tf.unstack(proposal_boxes), tf.unstack(proposal_scores), tf.unstack(num_proposals), groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): single_image_boxlist = box_list.BoxList(single_image_proposal_boxes) single_image_boxlist.add_field(fields.BoxListFields.scores, single_image_proposal_scores) sampled_boxlist = self._sample_box_classifier_minibatch_single_image( single_image_boxlist, single_image_num_proposals, single_image_groundtruth_boxlist, single_image_groundtruth_classes_with_background, single_image_groundtruth_weights) sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list( sampled_boxlist, num_boxes=self._second_stage_batch_size) single_image_num_proposals_sample.append(tf.minimum( sampled_boxlist.num_boxes(), self._second_stage_batch_size)) bb = sampled_padded_boxlist.get() single_image_proposal_box_sample.append(bb) single_image_proposal_score_sample.append( sampled_padded_boxlist.get_field(fields.BoxListFields.scores)) return (tf.stack(single_image_proposal_box_sample), tf.stack(single_image_proposal_score_sample), tf.stack(single_image_num_proposals_sample)) def _format_groundtruth_data(self, image_shapes): """Helper function for preparing groundtruth data for target assignment. In order to be consistent with the model.DetectionModel interface, groundtruth boxes are specified in normalized coordinates and classes are specified as label indices with no assumed background category. To prepare for target assignment, we: 1) convert boxes to absolute coordinates, 2) add a background class at class index 0 3) groundtruth instance masks, if available, are resized to match image_shape. Args: image_shapes: a 2-D int32 tensor of shape [batch_size, 3] containing shapes of input image in the batch. Returns: groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of shape [num_boxes, image_height, image_width] containing instance masks. This is set to None if no masks exist in the provided groundtruth. """ # pylint: disable=g-complex-comprehension groundtruth_boxlists = [ box_list_ops.to_absolute_coordinates( box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1]) for i, boxes in enumerate( self.groundtruth_lists(fields.BoxListFields.boxes)) ] groundtruth_classes_with_background_list = [] for one_hot_encoding in self.groundtruth_lists( fields.BoxListFields.classes): groundtruth_classes_with_background_list.append( tf.cast( tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'), dtype=tf.float32)) groundtruth_masks_list = self._groundtruth_lists.get( fields.BoxListFields.masks) # TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted. if groundtruth_masks_list is not None and self._resize_masks: resized_masks_list = [] for mask in groundtruth_masks_list: _, resized_mask, _ = self._image_resizer_fn( # Reuse the given `image_resizer_fn` to resize groundtruth masks. # `mask` tensor for an image is of the shape [num_masks, # image_height, image_width]. Below we create a dummy image of the # the shape [image_height, image_width, 1] to use with # `image_resizer_fn`. image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])), masks=mask) resized_masks_list.append(resized_mask) groundtruth_masks_list = resized_masks_list # Masks could be set to bfloat16 in the input pipeline for performance # reasons. Convert masks back to floating point space here since the rest of # this module assumes groundtruth to be of float32 type. float_groundtruth_masks_list = [] if groundtruth_masks_list: for mask in groundtruth_masks_list: float_groundtruth_masks_list.append(tf.cast(mask, tf.float32)) groundtruth_masks_list = float_groundtruth_masks_list if self.groundtruth_has_field(fields.BoxListFields.weights): groundtruth_weights_list = self.groundtruth_lists( fields.BoxListFields.weights) else: # Set weights for all batch elements equally to 1.0 groundtruth_weights_list = [] for groundtruth_classes in groundtruth_classes_with_background_list: num_gt = tf.shape(groundtruth_classes)[0] groundtruth_weights = tf.ones(num_gt) groundtruth_weights_list.append(groundtruth_weights) return (groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_masks_list, groundtruth_weights_list) def _sample_box_classifier_minibatch_single_image( self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist, groundtruth_classes_with_background, groundtruth_weights): """Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. num_valid_proposals: Number of valid proposals in the proposal boxlist. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. groundtruth_weights: Weights attached to the groundtruth_boxes. Returns: a BoxList contained sampled proposals. """ (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), groundtruth_weights=groundtruth_weights) # Selects all boxes as candidates if none of them is selected according # to cls_weights. This could happen as boxes within certain IOU ranges # are ignored. If triggered, the selected boxes will still be ignored # during loss computation. cls_weights = tf.reduce_mean(cls_weights, axis=-1) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) valid_indicator = tf.logical_and( tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals, cls_weights > 0 ) selected_positions = self._second_stage_sampler.subsample( valid_indicator, self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask( proposal_boxlist, selected_positions, use_static_shapes=self._use_static_shapes, indicator_sum=(self._second_stage_batch_size if self._use_static_shapes else None)) def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized, image_shape, **side_inputs): """Crops to a set of proposals from the feature map for a batch of images. Helper function for self._postprocess_rpn. This function calls `tf.image.crop_and_resize` to create the feature map to be passed to the second stage box classifier for each proposal. Args: features_to_crop: A float32 tensor with shape [batch_size, height, width, depth] proposal_boxes_normalized: A float32 tensor with shape [batch_size, num_proposals, box_code_size] containing proposal boxes in normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. **side_inputs: additional tensors that are required by the network. Returns: A float32 tensor with shape [K, new_height, new_width, depth]. """ num_levels = len(features_to_crop) box_levels = None if num_levels != 1: # If there are multiple levels to select, get the box levels # unit_scale_index: num_levels-2 is chosen based on section 4.2 of # https://arxiv.org/pdf/1612.03144.pdf and works best for Resnet based # feature extractor. box_levels = ops.fpn_feature_levels( num_levels, num_levels - 2, tf.sqrt(tf.cast(image_shape[1] * image_shape[2], tf.float32)) / 224.0, proposal_boxes_normalized) cropped_regions = self._flatten_first_two_dimensions( self._crop_and_resize_fn( features_to_crop, proposal_boxes_normalized, box_levels, [self._initial_crop_size, self._initial_crop_size])) return self._maxpool_layer(cropped_regions) def _postprocess_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, image_shapes, mask_predictions=None): """Converts predictions from the second stage box classifier to detections. Args: refined_box_encodings: a 3-D float tensor with shape [total_num_padded_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings. If using a shared box across classes the shape will instead be [total_num_padded_proposals, 1, 4] class_predictions_with_background: a 2-D tensor float with shape [total_num_padded_proposals, num_classes + 1] containing class predictions (logits) for each of the proposals. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: a 3-D float tensor with shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. num_proposals: a 1-D int32 tensor of shape [batch] representing the number of proposals predicted for each image in the batch. image_shapes: a 2-D int32 tensor containing shapes of input image in the batch. mask_predictions: (optional) a 4-D float tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask prediction logits. Returns: A dictionary containing: `detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates. `detection_scores`: [batch, max_detections] `detection_multiclass_scores`: [batch, max_detections, num_classes_with_background] tensor with class score distribution for post-processed detection boxes including background class if any. `detection_anchor_indices`: [batch, max_detections] with anchor indices. `detection_classes`: [batch, max_detections] `num_detections`: [batch] `detection_masks`: (optional) [batch, max_detections, mask_height, mask_width]. Note that a pixel-wise sigmoid score converter is applied to the detection masks. `raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded detection boxes in normalized coordinates, before Non-Max Suppression. The value total_detections is the number of second stage anchors (i.e. the total number of boxes before NMS). `raw_detection_scores`: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw detection boxes. The value total_detections is the number of second stage anchors (i.e. the total number of boxes before NMS). """ refined_box_encodings_batch = tf.reshape( refined_box_encodings, [-1, self.max_num_proposals, refined_box_encodings.shape[1], self._box_coder.code_size]) class_predictions_with_background_batch = tf.reshape( class_predictions_with_background, [-1, self.max_num_proposals, self.num_classes + 1] ) refined_decoded_boxes_batch = self._batch_decode_boxes( refined_box_encodings_batch, proposal_boxes) class_predictions_with_background_batch_normalized = ( self._second_stage_score_conversion_fn( class_predictions_with_background_batch)) class_predictions_batch = tf.reshape( tf.slice(class_predictions_with_background_batch_normalized, [0, 0, 1], [-1, -1, -1]), [-1, self.max_num_proposals, self.num_classes]) clip_window = self._compute_clip_window(image_shapes) mask_predictions_batch = None if mask_predictions is not None: mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2]) mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3]) mask_predictions = tf.sigmoid(mask_predictions) mask_predictions_batch = tf.reshape( mask_predictions, [-1, self.max_num_proposals, self.num_classes, mask_height, mask_width]) batch_size = shape_utils.combined_static_and_dynamic_shape( refined_box_encodings_batch)[0] batch_anchor_indices = tf.tile( tf.expand_dims(tf.range(self.max_num_proposals), 0), multiples=[batch_size, 1]) additional_fields = { 'multiclass_scores': class_predictions_with_background_batch_normalized, 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32) } (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections) = self._second_stage_nms_fn( refined_decoded_boxes_batch, class_predictions_batch, clip_window=clip_window, change_coordinate_frame=True, num_valid_boxes=num_proposals, additional_fields=additional_fields, masks=mask_predictions_batch) if refined_decoded_boxes_batch.shape[2] > 1: class_ids = tf.expand_dims( tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2, output_type=tf.int32), axis=-1) raw_detection_boxes = tf.squeeze( tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2) else: raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2) raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn( self._normalize_and_clip_boxes, elems=[raw_detection_boxes, image_shapes], dtype=tf.float32) detections = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.detection_multiclass_scores: nmsed_additional_fields['multiclass_scores'], fields.DetectionResultFields.detection_anchor_indices: tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), fields.DetectionResultFields.num_detections: tf.cast(num_detections, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: raw_normalized_detection_boxes, fields.DetectionResultFields.raw_detection_scores: class_predictions_with_background_batch_normalized } if nmsed_masks is not None: detections[fields.DetectionResultFields.detection_masks] = nmsed_masks return detections def _batch_decode_boxes(self, box_encodings, anchor_boxes): """Decodes box encodings with respect to the anchor boxes. Args: box_encodings: a 4-D tensor with shape [batch_size, num_anchors, num_classes, self._box_coder.code_size] representing box encodings. anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size] representing decoded bounding boxes. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. Returns: decoded_boxes: a [batch_size, num_anchors, num_classes, self._box_coder.code_size] float tensor representing bounding box predictions (for each image in batch, proposal and class). If using a shared box across classes the shape will instead be [batch_size, num_anchors, 1, self._box_coder.code_size]. """ combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) num_classes = combined_shape[2] tiled_anchor_boxes = tf.tile( tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1]) tiled_anchors_boxlist = box_list.BoxList( tf.reshape(tiled_anchor_boxes, [-1, 4])) decoded_boxes = self._box_coder.decode( tf.reshape(box_encodings, [-1, self._box_coder.code_size]), tiled_anchors_boxlist) return tf.reshape(decoded_boxes.get(), tf.stack([combined_shape[0], combined_shape[1], num_classes, 4])) def _normalize_and_clip_boxes(self, boxes_and_image_shape): """Normalize and clip boxes.""" boxes_per_image = boxes_and_image_shape[0] image_shape = boxes_and_image_shape[1] boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3 if boxes_contains_classes_dim: boxes_per_image = shape_utils.flatten_first_n_dimensions( boxes_per_image, 2) normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( box_list.BoxList(boxes_per_image), image_shape[0], image_shape[1], check_range=False).get() normalized_boxes_per_image = box_list_ops.clip_to_window( box_list.BoxList(normalized_boxes_per_image), tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32), filter_nonoverlapping=False).get() if boxes_contains_classes_dim: max_num_proposals, num_classes, _ = ( shape_utils.combined_static_and_dynamic_shape( boxes_and_image_shape[0])) normalized_boxes_per_image = shape_utils.expand_first_dimension( normalized_boxes_per_image, [max_num_proposals, num_classes]) return normalized_boxes_per_image def loss(self, prediction_dict, true_image_shapes, scope=None): """Compute scalar loss tensors given prediction tensors. If number_of_stages=1, only RPN related losses are computed (i.e., `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all losses are computed. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If number_of_stages=1, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, `image_shape`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, and `proposal_boxes` fields. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`, 'second_stage_localization_loss', 'second_stage_classification_loss') to scalar tensors representing corresponding loss values. """ with tf.name_scope(scope, 'Loss', prediction_dict.values()): (groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_masks_list, groundtruth_weights_list ) = self._format_groundtruth_data( self._image_batch_shape_2d(prediction_dict['image_shape'])) loss_dict = self._loss_rpn( prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) if self._number_of_stages > 1: loss_dict.update( self._loss_box_classifier( prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, prediction_dict['image_shape'], prediction_dict.get('mask_predictions'), groundtruth_masks_list, prediction_dict.get( fields.DetectionResultFields.detection_boxes), prediction_dict.get( fields.DetectionResultFields.num_detections))) return loss_dict def _loss_rpn(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Computes scalar RPN loss tensors. Uses self._proposal_target_assigner to obtain regression and classification targets for the first stage RPN, samples a "minibatch" of anchors to participate in the loss computation, and returns the RPN losses. Args: rpn_box_encodings: A 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background: A 2-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. groundtruth_boxlists: A list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`) to scalar tensors representing corresponding loss values. """ with tf.name_scope('RPNLoss'): (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._proposal_target_assigner, anchors_batch=box_list.BoxList(anchors), gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=(len(groundtruth_boxlists) * [None]), gt_weights_batch=groundtruth_weights_list) batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2) batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2) def _minibatch_subsample_fn(inputs): cls_targets, cls_weights = inputs return self._first_stage_sampler.subsample( tf.cast(cls_weights, tf.bool), self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool)) batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn( _minibatch_subsample_fn, [batch_cls_targets, batch_cls_weights], dtype=tf.bool, parallel_iterations=self._parallel_iterations, back_prop=True), dtype=tf.float32) # Normalize by number of examples in sampled minibatch normalizer = tf.maximum( tf.reduce_sum(batch_sampled_indices, axis=1), 1.0) batch_one_hot_targets = tf.one_hot( tf.cast(batch_cls_targets, dtype=tf.int32), depth=2) sampled_reg_indices = tf.multiply(batch_sampled_indices, batch_reg_weights) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) localization_losses = self._first_stage_localization_loss( rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices, losses_mask=losses_mask) objectness_losses = self._first_stage_objectness_loss( rpn_objectness_predictions_with_background, batch_one_hot_targets, weights=tf.expand_dims(batch_sampled_indices, axis=-1), losses_mask=losses_mask) localization_loss = tf.reduce_mean( tf.reduce_sum(localization_losses, axis=1) / normalizer) objectness_loss = tf.reduce_mean( tf.reduce_sum(objectness_losses, axis=1) / normalizer) localization_loss = tf.multiply(self._first_stage_loc_loss_weight, localization_loss, name='localization_loss') objectness_loss = tf.multiply(self._first_stage_obj_loss_weight, objectness_loss, name='objectness_loss') loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss, 'Loss/RPNLoss/objectness_loss': objectness_loss} return loss_dict def _loss_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, image_shape, prediction_masks=None, groundtruth_masks_list=None, detection_boxes=None, num_detections=None): """Computes scalar box classifier loss tensors. Uses self._detector_target_assigner to obtain regression and classification targets for the second stage box classifier, optionally performs hard mining, and returns losses. All losses are computed independently for each image and then averaged across the batch. Please note that for boxes and masks with multiple labels, the box regression and mask prediction losses are only computed for one label. This function assumes that the proposal boxes in the "padded" regions are actually zero (and thus should not be matched to). Args: refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, box_coder.code_size] representing predicted (final) refined box encodings. If using a shared box across classes this will instead have shape [total_num_proposals, 1, box_coder.code_size]. class_predictions_with_background: a 2-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. groundtruth_boxlists: a list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the class targets with the 0th index assumed to map to the background class. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. image_shape: a 1-D tensor of shape [4] representing the image shape. prediction_masks: an optional 4-D tensor with shape [total_num_proposals, num_classes, mask_height, mask_width] containing the instance masks for each box. groundtruth_masks_list: an optional list of 3-D tensors of shape [num_boxes, image_height, image_width] containing the instance masks for each of the boxes. detection_boxes: 3-D float tensor of shape [batch, max_total_detections, 4] containing post-processed detection boxes in normalized co-ordinates. num_detections: 1-D int32 tensor of shape [batch] containing number of valid detections in `detection_boxes`. Returns: a dictionary mapping loss keys ('second_stage_localization_loss', 'second_stage_classification_loss') to scalar tensors representing corresponding loss values. Raises: ValueError: if `predict_instance_masks` in second_stage_mask_rcnn_box_predictor is True and `groundtruth_masks_list` is not provided. """ with tf.name_scope('BoxClassifierLoss'): paddings_indicator = self._padded_batched_proposals_indicator( num_proposals, proposal_boxes.shape[1]) proposal_boxlists = [ box_list.BoxList(proposal_boxes_single_image) for proposal_boxes_single_image in tf.unstack(proposal_boxes)] batch_size = len(proposal_boxlists) num_proposals_or_one = tf.cast(tf.expand_dims( tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1), dtype=tf.float32) normalizer = tf.tile(num_proposals_or_one, [1, self.max_num_proposals]) * batch_size (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_classes_with_background_list, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), gt_weights_batch=groundtruth_weights_list) if self.groundtruth_has_field( fields.InputDataFields.groundtruth_labeled_classes): gt_labeled_classes = self.groundtruth_lists( fields.InputDataFields.groundtruth_labeled_classes) gt_labeled_classes = tf.pad( gt_labeled_classes, [[0, 0], [1, 0]], mode='CONSTANT', constant_values=1) batch_cls_weights *= tf.expand_dims(gt_labeled_classes, 1) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, self.max_num_proposals, -1]) flat_cls_targets_with_background = tf.reshape( batch_cls_targets_with_background, [batch_size * self.max_num_proposals, -1]) one_hot_flat_cls_targets_with_background = tf.argmax( flat_cls_targets_with_background, axis=1) one_hot_flat_cls_targets_with_background = tf.one_hot( one_hot_flat_cls_targets_with_background, flat_cls_targets_with_background.get_shape()[1]) # If using a shared box across classes use directly if refined_box_encodings.shape[1] == 1: reshaped_refined_box_encodings = tf.reshape( refined_box_encodings, [batch_size, self.max_num_proposals, self._box_coder.code_size]) # For anchors with multiple labels, picks refined_location_encodings # for just one class to avoid over-counting for regression loss and # (optionally) mask loss. else: reshaped_refined_box_encodings = ( self._get_refined_encodings_for_postitive_class( refined_box_encodings, one_hot_flat_cls_targets_with_background, batch_size)) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) second_stage_loc_losses = self._second_stage_localization_loss( reshaped_refined_box_encodings, batch_reg_targets, weights=batch_reg_weights, losses_mask=losses_mask) / normalizer second_stage_cls_losses = ops.reduce_sum_trailing_dimensions( self._second_stage_classification_loss( class_predictions_with_background, batch_cls_targets_with_background, weights=batch_cls_weights, losses_mask=losses_mask), ndims=2) / normalizer second_stage_loc_loss = tf.reduce_sum( second_stage_loc_losses * tf.cast(paddings_indicator, dtype=tf.float32)) second_stage_cls_loss = tf.reduce_sum( second_stage_cls_losses * tf.cast(paddings_indicator, dtype=tf.float32)) if self._hard_example_miner: (second_stage_loc_loss, second_stage_cls_loss ) = self._unpad_proposals_and_apply_hard_mining( proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals) localization_loss = tf.multiply(self._second_stage_loc_loss_weight, second_stage_loc_loss, name='localization_loss') classification_loss = tf.multiply(self._second_stage_cls_loss_weight, second_stage_cls_loss, name='classification_loss') loss_dict = {'Loss/BoxClassifierLoss/localization_loss': localization_loss, 'Loss/BoxClassifierLoss/classification_loss': classification_loss} second_stage_mask_loss = None if prediction_masks is not None: if groundtruth_masks_list is None: raise ValueError('Groundtruth instance masks not provided. ' 'Please configure input reader.') if not self._is_training: (proposal_boxes, proposal_boxlists, paddings_indicator, one_hot_flat_cls_targets_with_background ) = self._get_mask_proposal_boxes_and_classes( detection_boxes, num_detections, image_shape, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list) unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32) (batch_mask_targets, _, _, batch_mask_target_weights, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_masks_list, unmatched_class_label=unmatched_mask_label, gt_weights_batch=groundtruth_weights_list) # Pad the prediction_masks with to add zeros for background class to be # consistent with class predictions. if prediction_masks.get_shape().as_list()[1] == 1: # Class agnostic masks or masks for one-class prediction. Logic for # both cases is the same since background predictions are ignored # through the batch_mask_target_weights. prediction_masks_masked_by_class_targets = prediction_masks else: prediction_masks_with_background = tf.pad( prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]]) prediction_masks_masked_by_class_targets = tf.boolean_mask( prediction_masks_with_background, tf.greater(one_hot_flat_cls_targets_with_background, 0)) mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2]) mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3]) reshaped_prediction_masks = tf.reshape( prediction_masks_masked_by_class_targets, [batch_size, -1, mask_height * mask_width]) batch_mask_targets_shape = tf.shape(batch_mask_targets) flat_gt_masks = tf.reshape(batch_mask_targets, [-1, batch_mask_targets_shape[2], batch_mask_targets_shape[3]]) # Use normalized proposals to crop mask targets from image masks. flat_normalized_proposals = box_list_ops.to_normalized_coordinates( box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])), image_shape[1], image_shape[2], check_range=False).get() flat_cropped_gt_mask = self._crop_and_resize_fn( [tf.expand_dims(flat_gt_masks, -1)], tf.expand_dims(flat_normalized_proposals, axis=1), None, [mask_height, mask_width]) # Without stopping gradients into cropped groundtruth masks the # performance with 100-padded groundtruth masks when batch size > 1 is # about 4% worse. # TODO(rathodv): Investigate this since we don't expect any variables # upstream of flat_cropped_gt_mask. flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask) batch_cropped_gt_mask = tf.reshape( flat_cropped_gt_mask, [batch_size, -1, mask_height * mask_width]) mask_losses_weights = ( batch_mask_target_weights * tf.cast(paddings_indicator, dtype=tf.float32)) mask_losses = self._second_stage_mask_loss( reshaped_prediction_masks, batch_cropped_gt_mask, weights=tf.expand_dims(mask_losses_weights, axis=-1), losses_mask=losses_mask) total_mask_loss = tf.reduce_sum(mask_losses) normalizer = tf.maximum( tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0) second_stage_mask_loss = total_mask_loss / normalizer if second_stage_mask_loss is not None: mask_loss = tf.multiply(self._second_stage_mask_loss_weight, second_stage_mask_loss, name='mask_loss') loss_dict['Loss/BoxClassifierLoss/mask_loss'] = mask_loss return loss_dict def _get_mask_proposal_boxes_and_classes( self, detection_boxes, num_detections, image_shape, groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list): """Returns proposal boxes and class targets to compute evaluation mask loss. During evaluation, detection boxes are used to extract features for mask prediction. Therefore, to compute mask loss during evaluation detection boxes must be used to compute correct class and mask targets. This function returns boxes and classes in the correct format for computing mask targets during evaluation. Args: detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes, 4] containing detection boxes in normalized co-ordinates. num_detections: A 1-D float tensor of shape [batch] containing number of valid boxes in `detection_boxes`. image_shape: A 1-D tensor of shape [4] containing image tensor shape. groundtruth_boxlists: A list of groundtruth boxlists. groundtruth_classes_with_background_list: A list of groundtruth classes. groundtruth_weights_list: A list of groundtruth weights. Return: mask_proposal_boxes: detection boxes to use for mask proposals in absolute co-ordinates. mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in absolute co-ordinates. mask_proposal_paddings_indicator: a tensor indicating valid boxes. mask_proposal_one_hot_flat_cls_targets_with_background: Class targets computed using detection boxes. """ batch, max_num_detections, _ = detection_boxes.shape.as_list() proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates( box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1], image_shape[2]).get(), [batch, max_num_detections, 4]) proposal_boxlists = [ box_list.BoxList(detection_boxes_single_image) for detection_boxes_single_image in tf.unstack(proposal_boxes) ] paddings_indicator = self._padded_batched_proposals_indicator( tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1]) (batch_cls_targets_with_background, _, _, _, _) = target_assigner.batch_assign_targets( target_assigner=self._detector_target_assigner, anchors_batch=proposal_boxlists, gt_box_batch=groundtruth_boxlists, gt_class_targets_batch=groundtruth_classes_with_background_list, unmatched_class_label=tf.constant( [1] + self._num_classes * [0], dtype=tf.float32), gt_weights_batch=groundtruth_weights_list) flat_cls_targets_with_background = tf.reshape( batch_cls_targets_with_background, [-1, self._num_classes + 1]) one_hot_flat_cls_targets_with_background = tf.argmax( flat_cls_targets_with_background, axis=1) one_hot_flat_cls_targets_with_background = tf.one_hot( one_hot_flat_cls_targets_with_background, flat_cls_targets_with_background.get_shape()[1]) return (proposal_boxes, proposal_boxlists, paddings_indicator, one_hot_flat_cls_targets_with_background) def _get_refined_encodings_for_postitive_class( self, refined_box_encodings, flat_cls_targets_with_background, batch_size): # We only predict refined location encodings for the non background # classes, but we now pad it to make it compatible with the class # predictions refined_box_encodings_with_background = tf.pad(refined_box_encodings, [[0, 0], [1, 0], [0, 0]]) refined_box_encodings_masked_by_class_targets = ( box_list_ops.boolean_mask( box_list.BoxList( tf.reshape(refined_box_encodings_with_background, [-1, self._box_coder.code_size])), tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]), use_static_shapes=self._use_static_shapes, indicator_sum=batch_size * self.max_num_proposals if self._use_static_shapes else None).get()) return tf.reshape( refined_box_encodings_masked_by_class_targets, [ batch_size, self.max_num_proposals, self._box_coder.code_size ]) def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals): """Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. """ batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile( tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile( tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index) def _unpad_proposals_and_apply_hard_mining(self, proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals): """Unpads proposals and applies hard mining. Args: proposal_boxlists: A list of `batch_size` BoxLists each representing `self.max_num_proposals` representing decoded proposal bounding boxes for each image. second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage localization loss values. second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage classification loss values. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. Returns: second_stage_loc_loss: A scalar float32 tensor representing the second stage localization loss. second_stage_cls_loss: A scalar float32 tensor representing the second stage classification loss. """ for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, single_image_num_proposals) in zip( proposal_boxlists, tf.unstack(second_stage_loc_losses), tf.unstack(second_stage_cls_losses), tf.unstack(num_proposals)): proposal_boxlist = box_list.BoxList( tf.slice(proposal_boxlist.get(), [0, 0], [single_image_num_proposals, -1])) single_image_loc_loss = tf.slice(single_image_loc_loss, [0], [single_image_num_proposals]) single_image_cls_loss = tf.slice(single_image_cls_loss, [0], [single_image_num_proposals]) return self._hard_example_miner( location_losses=tf.expand_dims(single_image_loc_loss, 0), cls_losses=tf.expand_dims(single_image_cls_loss, 0), decoded_boxlist_list=[proposal_boxlist]) def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ all_losses = [] slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # Copy the slim losses to avoid modifying the collection if slim_losses: all_losses.extend(slim_losses) # TODO(kaftan): Possibly raise an error if the feature extractors are # uninitialized in Keras. if self._feature_extractor_for_proposal_features: if (self._feature_extractor_for_proposal_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend(self._feature_extractor_for_proposal_features.losses) if isinstance(self._first_stage_box_predictor_first_conv, tf.keras.Model): all_losses.extend( self._first_stage_box_predictor_first_conv.losses) if self._first_stage_box_predictor.is_keras_model: all_losses.extend(self._first_stage_box_predictor.losses) if self._feature_extractor_for_box_classifier_features: if (self._feature_extractor_for_box_classifier_features != _UNINITIALIZED_FEATURE_EXTRACTOR): all_losses.extend( self._feature_extractor_for_box_classifier_features.losses) if self._mask_rcnn_box_predictor: if self._mask_rcnn_box_predictor.is_keras_model: all_losses.extend(self._mask_rcnn_box_predictor.losses) return all_losses def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): """Returns a map of variables to load from a foreign checkpoint. See parent class for details. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. load_all_detection_checkpoint_vars: whether to load all variables (when `fine_tune_checkpoint_type` is `detection`). If False, only variables within the feature extractor scopes are included. Default False. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. Raises: ValueError: if fine_tune_checkpoint_type is neither `classification` nor `detection`. """ if fine_tune_checkpoint_type not in ['detection', 'classification']: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) if fine_tune_checkpoint_type == 'classification': return self._feature_extractor.restore_from_classification_checkpoint_fn( self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope) variables_to_restore = variables_helper.get_global_variables_safely() variables_to_restore.append(tf.train.get_or_create_global_step()) # Only load feature extractor variables to be consistent with loading from # a classification checkpoint. include_patterns = None if not load_all_detection_checkpoint_vars: include_patterns = [ self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope ] feature_extractor_variables = slim.filter_variables( variables_to_restore, include_patterns=include_patterns) return {var.op.name: var for var in feature_extractor_variables} def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (above) is intended to be used to restore Slim-based models when running Tensorflow 1.x. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'detection': fake_model = tf.train.Checkpoint( _feature_extractor_for_box_classifier_features= self._feature_extractor_for_box_classifier_features, _feature_extractor_for_proposal_features= self._feature_extractor_for_proposal_features) return {'model': fake_model} elif fine_tune_checkpoint_type == 'full': return {'model': self} else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) # Passing None to get_updates_for grabs updates that should always be # executed and don't depend on any model inputs in the graph. # (E.g. if there was some count that should be incremented every time a # model is run). # # Passing inputs grabs updates that are transitively computed from the # model inputs being passed in. # (E.g. a batchnorm update depends on the observed inputs) if self._feature_extractor_for_proposal_features: if (self._feature_extractor_for_proposal_features != _UNINITIALIZED_FEATURE_EXTRACTOR): update_ops.extend( self._feature_extractor_for_proposal_features.get_updates_for(None)) update_ops.extend( self._feature_extractor_for_proposal_features.get_updates_for( self._feature_extractor_for_proposal_features.inputs)) if isinstance(self._first_stage_box_predictor_first_conv, tf.keras.Model): update_ops.extend( self._first_stage_box_predictor_first_conv.get_updates_for( None)) update_ops.extend( self._first_stage_box_predictor_first_conv.get_updates_for( self._first_stage_box_predictor_first_conv.inputs)) if self._first_stage_box_predictor.is_keras_model: update_ops.extend( self._first_stage_box_predictor.get_updates_for(None)) update_ops.extend( self._first_stage_box_predictor.get_updates_for( self._first_stage_box_predictor.inputs)) if self._feature_extractor_for_box_classifier_features: if (self._feature_extractor_for_box_classifier_features != _UNINITIALIZED_FEATURE_EXTRACTOR): update_ops.extend( self._feature_extractor_for_box_classifier_features.get_updates_for( None)) update_ops.extend( self._feature_extractor_for_box_classifier_features.get_updates_for( self._feature_extractor_for_box_classifier_features.inputs)) if self._mask_rcnn_box_predictor: if self._mask_rcnn_box_predictor.is_keras_model: update_ops.extend( self._mask_rcnn_box_predictor.get_updates_for(None)) update_ops.extend( self._mask_rcnn_box_predictor.get_updates_for( self._mask_rcnn_box_predictor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/faster_rcnn_meta_arch.py
faster_rcnn_meta_arch.py
import collections import numpy as np import tensorflow as tf from object_detection.builders import losses_builder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import losses from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.meta_architectures import center_net_meta_arch from object_detection.models.keras_models import hourglass_network from object_detection.models.keras_models import resnet_v1 from object_detection.protos import losses_pb2 from object_detection.protos import preprocessor_pb2 from object_detection.utils import shape_utils from object_detection.utils import spatial_transform_ops INSTANCE_EMBEDDING = 'INSTANCE_EMBEDDING' PIXEL_EMBEDDING = 'PIXEL_EMBEDDING' DEEP_MASK_ESTIMATION = 'deep_mask_estimation' LOSS_KEY_PREFIX = center_net_meta_arch.LOSS_KEY_PREFIX class DeepMACParams( collections.namedtuple('DeepMACParams', [ 'classification_loss', 'dim', 'task_loss_weight', 'pixel_embedding_dim', 'allowed_masked_classes_ids', 'mask_size', 'mask_num_subsamples', 'use_xy', 'network_type', 'use_instance_embedding', 'num_init_channels', 'predict_full_resolution_masks', 'postprocess_crop_size', 'max_roi_jitter_ratio', 'roi_jitter_mode' ])): """Class holding the DeepMAC network configutration.""" __slots__ = () def __new__(cls, classification_loss, dim, task_loss_weight, pixel_embedding_dim, allowed_masked_classes_ids, mask_size, mask_num_subsamples, use_xy, network_type, use_instance_embedding, num_init_channels, predict_full_resolution_masks, postprocess_crop_size, max_roi_jitter_ratio, roi_jitter_mode): return super(DeepMACParams, cls).__new__(cls, classification_loss, dim, task_loss_weight, pixel_embedding_dim, allowed_masked_classes_ids, mask_size, mask_num_subsamples, use_xy, network_type, use_instance_embedding, num_init_channels, predict_full_resolution_masks, postprocess_crop_size, max_roi_jitter_ratio, roi_jitter_mode) def subsample_instances(classes, weights, boxes, masks, num_subsamples): """Randomly subsamples instances to the desired number. Args: classes: [num_instances, num_classes] float tensor of one-hot encoded classes. weights: [num_instances] float tensor of weights of each instance. boxes: [num_instances, 4] tensor of box coordinates. masks: [num_instances, height, width] tensor of per-instance masks. num_subsamples: int, the desired number of samples. Returns: classes: [num_subsamples, num_classes] float tensor of classes. weights: [num_subsamples] float tensor of weights. boxes: [num_subsamples, 4] float tensor of box coordinates. masks: [num_subsamples, height, width] float tensor of per-instance masks. """ if num_subsamples <= -1: return classes, weights, boxes, masks num_instances = tf.reduce_sum(tf.cast(weights > 0.5, tf.int32)) if num_instances <= num_subsamples: return (classes[:num_subsamples], weights[:num_subsamples], boxes[:num_subsamples], masks[:num_subsamples]) else: random_index = tf.random.uniform([num_subsamples], 0, num_instances, dtype=tf.int32) return (tf.gather(classes, random_index), tf.gather(weights, random_index), tf.gather(boxes, random_index), tf.gather(masks, random_index)) def _get_deepmac_network_by_type(name, num_init_channels, mask_size=None): """Get DeepMAC network model given a string type.""" if name.startswith('hourglass'): if name == 'hourglass10': return hourglass_network.hourglass_10(num_init_channels, initial_downsample=False) elif name == 'hourglass20': return hourglass_network.hourglass_20(num_init_channels, initial_downsample=False) elif name == 'hourglass32': return hourglass_network.hourglass_32(num_init_channels, initial_downsample=False) elif name == 'hourglass52': return hourglass_network.hourglass_52(num_init_channels, initial_downsample=False) elif name == 'hourglass100': return hourglass_network.hourglass_100(num_init_channels, initial_downsample=False) elif name == 'hourglass20_uniform_size': return hourglass_network.hourglass_20_uniform_size(num_init_channels) elif name == 'hourglass20_no_shortcut': return hourglass_network.hourglass_20_no_shortcut(num_init_channels) elif name == 'fully_connected': if not mask_size: raise ValueError('Mask size must be set.') return FullyConnectedMaskHead(num_init_channels, mask_size) elif name.startswith('resnet'): return ResNetMaskNetwork(name, num_init_channels) raise ValueError('Unknown network type {}'.format(name)) def crop_masks_within_boxes(masks, boxes, output_size): """Crops masks to lie tightly within the boxes. Args: masks: A [num_instances, height, width] float tensor of masks. boxes: A [num_instances, 4] sized tensor of normalized bounding boxes. output_size: The height and width of the output masks. Returns: masks: A [num_instances, output_size, output_size] tensor of masks which are cropped to be tightly within the gives boxes and resized. """ masks = spatial_transform_ops.matmul_crop_and_resize( masks[:, :, :, tf.newaxis], boxes[:, tf.newaxis, :], [output_size, output_size]) return masks[:, 0, :, :, 0] def resize_instance_masks(masks, shape): height, width = shape masks_ex = masks[:, :, :, tf.newaxis] masks_ex = tf.image.resize(masks_ex, (height, width), method=tf.image.ResizeMethod.BILINEAR) masks = masks_ex[:, :, :, 0] return masks def filter_masked_classes(masked_class_ids, classes, weights, masks): """Filter out masks whose class IDs are not present in masked_class_ids. Args: masked_class_ids: A list of class IDs allowed to have masks. These class IDs are 1-indexed. classes: A [num_instances, num_classes] float tensor containing the one-hot encoded classes. weights: A [num_instances] float tensor containing the weights of each sample. masks: A [num_instances, height, width] tensor containing the mask per instance. Returns: classes_filtered: A [num_instances, num_classes] float tensor containing the one-hot encoded classes with classes not in masked_class_ids zeroed out. weights_filtered: A [num_instances] float tensor containing the weights of each sample with instances whose classes aren't in masked_class_ids zeroed out. masks_filtered: A [num_instances, height, width] tensor containing the mask per instance with masks not belonging to masked_class_ids zeroed out. """ if len(masked_class_ids) == 0: # pylint:disable=g-explicit-length-test return classes, weights, masks if tf.shape(classes)[0] == 0: return classes, weights, masks masked_class_ids = tf.constant(np.array(masked_class_ids, dtype=np.int32)) label_id_offset = 1 masked_class_ids -= label_id_offset class_ids = tf.argmax(classes, axis=1, output_type=tf.int32) matched_classes = tf.equal( class_ids[:, tf.newaxis], masked_class_ids[tf.newaxis, :] ) matched_classes = tf.reduce_any(matched_classes, axis=1) matched_classes = tf.cast(matched_classes, tf.float32) return ( classes * matched_classes[:, tf.newaxis], weights * matched_classes, masks * matched_classes[:, tf.newaxis, tf.newaxis] ) class ResNetMaskNetwork(tf.keras.layers.Layer): """A small wrapper around ResNet blocks to predict masks.""" def __init__(self, resnet_type, num_init_channels): """Creates the ResNet mask network. Args: resnet_type: A string of the for resnetN where N where N is in [4, 8, 12, 16, 20] num_init_channels: Number of filters in the ResNet block. """ super(ResNetMaskNetwork, self).__init__() nc = num_init_channels if resnet_type == 'resnet4': channel_dims = [nc * 2] blocks = [2] elif resnet_type == 'resnet8': channel_dims = [nc * 2] blocks = [4] elif resnet_type == 'resnet12': channel_dims = [nc * 2] blocks = [6] elif resnet_type == 'resnet16': channel_dims = [nc * 2] blocks = [8] # Defined such that the channels are roughly similar to the hourglass20. elif resnet_type == 'resnet20': channel_dims = [nc * 2, nc * 3] blocks = [8, 2] else: raise ValueError('Unknown resnet type "{}"'.format(resnet_type)) self.input_layer = tf.keras.layers.Conv2D(nc, 1, 1) # Last channel has to be defined so that batch norm can initialize properly. model_input = tf.keras.layers.Input([None, None, nc]) output = model_input for i, (num_blocks, channels) in enumerate(zip(blocks, channel_dims)): output = resnet_v1.stack_basic(output, filters=channels, blocks=num_blocks, stride1=1, name='resnet_mask_block_%d' % i) self.model = tf.keras.Model(inputs=model_input, outputs=output) def __call__(self, inputs): return self.model(self.input_layer(inputs)) class FullyConnectedMaskHead(tf.keras.layers.Layer): """A 2 layer fully connected mask head.""" def __init__(self, num_init_channels, mask_size): super(FullyConnectedMaskHead, self).__init__() self.fc1 = tf.keras.layers.Dense(units=1024, activation='relu') self.fc2 = tf.keras.layers.Dense(units=mask_size*mask_size) self.mask_size = mask_size self.num_input_channels = num_init_channels self.input_layer = tf.keras.layers.Conv2D(num_init_channels, 1, 1) model_input = tf.keras.layers.Input( [mask_size * mask_size * num_init_channels,]) output = self.fc2(self.fc1(model_input)) self.model = tf.keras.Model(inputs=model_input, outputs=output) def __call__(self, inputs): inputs = self.input_layer(inputs) inputs_shape = tf.shape(inputs) num_instances = inputs_shape[0] height = inputs_shape[1] width = inputs_shape[2] dims = inputs_shape[3] flattened_inputs = tf.reshape(inputs, [num_instances, height * width * dims]) flattened_masks = self.model(flattened_inputs) return tf.reshape(flattened_masks, [num_instances, self.mask_size, self.mask_size, 1]) class MaskHeadNetwork(tf.keras.layers.Layer): """Mask head class for DeepMAC.""" def __init__(self, network_type, num_init_channels=64, use_instance_embedding=True, mask_size=None): """Initializes the network. Args: network_type: A string denoting the kind of network we want to use internally. num_init_channels: int, the number of channels in the first block. The number of channels in the following blocks depend on the network type used. use_instance_embedding: bool, if set, we concatenate the instance embedding to the input while predicting the mask. mask_size: int, size of the output mask. Required only with `fully_connected` mask type. """ super(MaskHeadNetwork, self).__init__() self._net = _get_deepmac_network_by_type( network_type, num_init_channels, mask_size) self._use_instance_embedding = use_instance_embedding self.project_out = tf.keras.layers.Conv2D( filters=1, kernel_size=1, activation=None) def __call__(self, instance_embedding, pixel_embedding, training): """Returns mask logits given object center and spatial embeddings. Args: instance_embedding: A [num_instances, embedding_size] float tensor representing the center emedding vector of each instance. pixel_embedding: A [num_instances, height, width, pixel_embedding_size] float tensor representing the per-pixel spatial embedding for each instance. training: boolean flag indicating training or testing mode. Returns: mask: A [num_instances, height, width] float tensor containing the mask logits for each instance. """ height = tf.shape(pixel_embedding)[1] width = tf.shape(pixel_embedding)[2] instance_embedding = instance_embedding[:, tf.newaxis, tf.newaxis, :] instance_embedding = tf.tile(instance_embedding, [1, height, width, 1]) if self._use_instance_embedding: inputs = tf.concat([pixel_embedding, instance_embedding], axis=3) else: inputs = pixel_embedding out = self._net(inputs) if isinstance(out, list): out = out[-1] if out.shape[-1] > 1: out = self.project_out(out) return tf.squeeze(out, axis=-1) def deepmac_proto_to_params(deepmac_config): """Convert proto to named tuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(deepmac_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) jitter_mode = preprocessor_pb2.RandomJitterBoxes.JitterMode.Name( deepmac_config.jitter_mode).lower() return DeepMACParams( dim=deepmac_config.dim, classification_loss=classification_loss, task_loss_weight=deepmac_config.task_loss_weight, pixel_embedding_dim=deepmac_config.pixel_embedding_dim, allowed_masked_classes_ids=deepmac_config.allowed_masked_classes_ids, mask_size=deepmac_config.mask_size, mask_num_subsamples=deepmac_config.mask_num_subsamples, use_xy=deepmac_config.use_xy, network_type=deepmac_config.network_type, use_instance_embedding=deepmac_config.use_instance_embedding, num_init_channels=deepmac_config.num_init_channels, predict_full_resolution_masks= deepmac_config.predict_full_resolution_masks, postprocess_crop_size=deepmac_config.postprocess_crop_size, max_roi_jitter_ratio=deepmac_config.max_roi_jitter_ratio, roi_jitter_mode=jitter_mode ) class DeepMACMetaArch(center_net_meta_arch.CenterNetMetaArch): """The experimental CenterNet DeepMAC[1] model. [1]: https://arxiv.org/abs/2104.00613 """ def __init__(self, is_training, add_summaries, num_classes, feature_extractor, image_resizer_fn, object_center_params, object_detection_params, deepmac_params, compute_heatmap_sparse=False): """Constructs the super class with object center & detection params only.""" self._deepmac_params = deepmac_params super(DeepMACMetaArch, self).__init__( is_training=is_training, add_summaries=add_summaries, num_classes=num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, compute_heatmap_sparse=compute_heatmap_sparse) def _construct_prediction_heads(self, num_classes, num_feature_outputs, class_prediction_bias_init): super_instance = super(DeepMACMetaArch, self) prediction_heads = super_instance._construct_prediction_heads( # pylint:disable=protected-access num_classes, num_feature_outputs, class_prediction_bias_init) if self._deepmac_params is not None: prediction_heads[INSTANCE_EMBEDDING] = [ center_net_meta_arch.make_prediction_net(self._deepmac_params.dim) for _ in range(num_feature_outputs) ] prediction_heads[PIXEL_EMBEDDING] = [ center_net_meta_arch.make_prediction_net( self._deepmac_params.pixel_embedding_dim) for _ in range(num_feature_outputs) ] self._mask_net = MaskHeadNetwork( network_type=self._deepmac_params.network_type, use_instance_embedding=self._deepmac_params.use_instance_embedding, num_init_channels=self._deepmac_params.num_init_channels) return prediction_heads def _get_mask_head_input(self, boxes, pixel_embedding): """Get the input to the mask network, given bounding boxes. Args: boxes: A [num_instances, 4] float tensor containing bounding boxes in normalized coordinates. pixel_embedding: A [height, width, embedding_size] float tensor containing spatial pixel embeddings. Returns: embedding: A [num_instances, mask_height, mask_width, embedding_size + 2] float tensor containing the inputs to the mask network. For each bounding box, we concatenate the normalized box coordinates to the cropped pixel embeddings. If predict_full_resolution_masks is set, mask_height and mask_width are the same as height and width of pixel_embedding. If not, mask_height and mask_width are the same as mask_size. """ num_instances = tf.shape(boxes)[0] mask_size = self._deepmac_params.mask_size if self._deepmac_params.predict_full_resolution_masks: num_instances = tf.shape(boxes)[0] pixel_embedding = pixel_embedding[tf.newaxis, :, :, :] pixel_embeddings_processed = tf.tile(pixel_embedding, [num_instances, 1, 1, 1]) else: # TODO(vighneshb) Explore multilevel_roi_align and align_corners=False. pixel_embeddings_cropped = spatial_transform_ops.matmul_crop_and_resize( pixel_embedding[tf.newaxis], boxes[tf.newaxis], [mask_size, mask_size]) pixel_embeddings_processed = pixel_embeddings_cropped[0] mask_shape = tf.shape(pixel_embeddings_processed) mask_height, mask_width = mask_shape[1], mask_shape[2] y_grid, x_grid = tf.meshgrid(tf.linspace(-1.0, 1.0, mask_height), tf.linspace(-1.0, 1.0, mask_width), indexing='ij') coords = tf.stack([y_grid, x_grid], axis=2) coords = coords[tf.newaxis, :, :, :] coords = tf.tile(coords, [num_instances, 1, 1, 1]) if self._deepmac_params.use_xy: return tf.concat([coords, pixel_embeddings_processed], axis=3) else: return pixel_embeddings_processed def _get_instance_embeddings(self, boxes, instance_embedding): """Return the instance embeddings from bounding box centers. Args: boxes: A [num_instances, 4] float tensor holding bounding boxes. The coordinates are in normalized input space. instance_embedding: A [height, width, embedding_size] float tensor containing the instance embeddings. Returns: instance_embeddings: A [num_instances, embedding_size] shaped float tensor containing the center embedding for each instance. """ blist = box_list.BoxList(boxes) output_height = tf.shape(instance_embedding)[0] output_width = tf.shape(instance_embedding)[1] blist_output = box_list_ops.to_absolute_coordinates( blist, output_height, output_width, check_range=False) (y_center_output, x_center_output, _, _) = blist_output.get_center_coordinates_and_sizes() center_coords_output = tf.stack([y_center_output, x_center_output], axis=1) center_coords_output_int = tf.cast(center_coords_output, tf.int32) center_latents = tf.gather_nd(instance_embedding, center_coords_output_int) return center_latents def _get_groundtruth_mask_output(self, boxes, masks): """Get the expected mask output for each box. Args: boxes: A [num_instances, 4] float tensor containing bounding boxes in normalized coordinates. masks: A [num_instances, height, width] float tensor containing binary ground truth masks. Returns: masks: If predict_full_resolution_masks is set, masks are not resized and the size of this tensor is [num_instances, input_height, input_width]. Otherwise, returns a tensor of size [num_instances, mask_size, mask_size]. """ mask_size = self._deepmac_params.mask_size if self._deepmac_params.predict_full_resolution_masks: return masks else: cropped_masks = spatial_transform_ops.matmul_crop_and_resize( masks[:, :, :, tf.newaxis], boxes[:, tf.newaxis, :], [mask_size, mask_size]) cropped_masks = tf.stop_gradient(cropped_masks) cropped_masks = tf.squeeze(cropped_masks, axis=[1, 4]) # TODO(vighneshb) should we discretize masks? return cropped_masks def _resize_logits_like_gt(self, logits, gt): height, width = tf.shape(gt)[1], tf.shape(gt)[2] return resize_instance_masks(logits, (height, width)) def _compute_per_instance_mask_loss( self, boxes, masks, instance_embedding, pixel_embedding): """Returns the mask loss per instance. Args: boxes: A [num_instances, 4] float tensor holding bounding boxes. The coordinates are in normalized input space. masks: A [num_instances, input_height, input_width] float tensor containing the instance masks. instance_embedding: A [output_height, output_width, embedding_size] float tensor containing the instance embeddings. pixel_embedding: optional [output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embeddings. Returns: mask_loss: A [num_instances] shaped float tensor containing the mask loss for each instance. """ num_instances = tf.shape(boxes)[0] if tf.keras.backend.learning_phase(): boxes = preprocessor.random_jitter_boxes( boxes, self._deepmac_params.max_roi_jitter_ratio, jitter_mode=self._deepmac_params.roi_jitter_mode) mask_input = self._get_mask_head_input( boxes, pixel_embedding) instance_embeddings = self._get_instance_embeddings( boxes, instance_embedding) mask_logits = self._mask_net( instance_embeddings, mask_input, training=tf.keras.backend.learning_phase()) mask_gt = self._get_groundtruth_mask_output(boxes, masks) mask_logits = self._resize_logits_like_gt(mask_logits, mask_gt) mask_logits = tf.reshape(mask_logits, [num_instances, -1, 1]) mask_gt = tf.reshape(mask_gt, [num_instances, -1, 1]) loss = self._deepmac_params.classification_loss( prediction_tensor=mask_logits, target_tensor=mask_gt, weights=tf.ones_like(mask_logits)) # TODO(vighneshb) Make this configurable via config. if isinstance(self._deepmac_params.classification_loss, losses.WeightedDiceClassificationLoss): return tf.reduce_sum(loss, axis=1) else: return tf.reduce_mean(loss, axis=[1, 2]) def _compute_instance_masks_loss(self, prediction_dict): """Computes the mask loss. Args: prediction_dict: dict from predict() method containing INSTANCE_EMBEDDING and PIXEL_EMBEDDING prediction. Both of these are lists of tensors, each of size [batch_size, height, width, embedding_size]. Returns: loss: float, the mask loss as a scalar. """ gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) allowed_masked_classes_ids = ( self._deepmac_params.allowed_masked_classes_ids) total_loss = 0.0 # Iterate over multiple preidctions by backbone (for hourglass length=2) for instance_pred, pixel_pred in zip( prediction_dict[INSTANCE_EMBEDDING], prediction_dict[PIXEL_EMBEDDING]): # Iterate over samples in batch # TODO(vighneshb) find out how autograph is handling this. Converting # to a single op may give speed improvements for i, (boxes, weights, classes, masks) in enumerate( zip(gt_boxes_list, gt_weights_list, gt_classes_list, gt_masks_list)): _, weights, masks = filter_masked_classes(allowed_masked_classes_ids, classes, weights, masks) num_subsample = self._deepmac_params.mask_num_subsamples _, weights, boxes, masks = subsample_instances( classes, weights, boxes, masks, num_subsample) per_instance_loss = self._compute_per_instance_mask_loss( boxes, masks, instance_pred[i], pixel_pred[i]) per_instance_loss *= weights num_instances = tf.maximum(tf.reduce_sum(weights), 1.0) total_loss += tf.reduce_sum(per_instance_loss) / num_instances batch_size = len(gt_boxes_list) num_predictions = len(prediction_dict[INSTANCE_EMBEDDING]) return total_loss / float(batch_size * num_predictions) def loss(self, prediction_dict, true_image_shapes, scope=None): losses_dict = super(DeepMACMetaArch, self).loss( prediction_dict, true_image_shapes, scope) if self._deepmac_params is not None: mask_loss = self._compute_instance_masks_loss( prediction_dict=prediction_dict) key = LOSS_KEY_PREFIX + '/' + DEEP_MASK_ESTIMATION losses_dict[key] = ( self._deepmac_params.task_loss_weight * mask_loss ) return losses_dict def postprocess(self, prediction_dict, true_image_shapes, **params): """Produces boxes given a prediction dict returned by predict(). Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). And all other fields returned by the super class method. """ postprocess_dict = super(DeepMACMetaArch, self).postprocess( prediction_dict, true_image_shapes, **params) boxes_strided = postprocess_dict['detection_boxes_strided'] if self._deepmac_params is not None: masks = self._postprocess_masks( boxes_strided, prediction_dict[INSTANCE_EMBEDDING][-1], prediction_dict[PIXEL_EMBEDDING][-1]) postprocess_dict[fields.DetectionResultFields.detection_masks] = masks return postprocess_dict def _postprocess_masks(self, boxes_output_stride, instance_embedding, pixel_embedding): """Postprocess masks with the deep mask network. Args: boxes_output_stride: A [batch_size, num_instances, 4] float tensor containing the batch of boxes in the absolute output space of the feature extractor. instance_embedding: A [batch_size, output_height, output_width, embedding_size] float tensor containing instance embeddings. pixel_embedding: A [batch_size, output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embedding. Returns: masks: A float tensor of size [batch_size, num_instances, mask_size, mask_size] containing binary per-box instance masks. """ def process(elems): boxes, instance_embedding, pixel_embedding = elems return self._postprocess_sample(boxes, instance_embedding, pixel_embedding) max_instances = self._center_params.max_box_predictions return tf.map_fn(process, [boxes_output_stride, instance_embedding, pixel_embedding], dtype=tf.float32, parallel_iterations=max_instances) def _postprocess_sample(self, boxes_output_stride, instance_embedding, pixel_embedding): """Post process masks for a single sample. Args: boxes_output_stride: A [num_instances, 4] float tensor containing bounding boxes in the absolute output space. instance_embedding: A [output_height, output_width, embedding_size] float tensor containing instance embeddings. pixel_embedding: A [batch_size, output_height, output_width, pixel_embedding_size] float tensor containing the per-pixel embedding. Returns: masks: A float tensor of size [num_instances, mask_height, mask_width] containing binary per-box instance masks. If predict_full_resolution_masks is set, the masks will be resized to postprocess_crop_size. Otherwise, mask_height=mask_width=mask_size """ height, width = (tf.shape(instance_embedding)[0], tf.shape(instance_embedding)[1]) height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) blist = box_list.BoxList(boxes_output_stride) blist = box_list_ops.to_normalized_coordinates( blist, height, width, check_range=False) boxes = blist.get() mask_input = self._get_mask_head_input(boxes, pixel_embedding) instance_embeddings = self._get_instance_embeddings( boxes, instance_embedding) mask_logits = self._mask_net( instance_embeddings, mask_input, training=tf.keras.backend.learning_phase()) # TODO(vighneshb) Explore sweeping mask thresholds. if self._deepmac_params.predict_full_resolution_masks: height, width = tf.shape(mask_logits)[1], tf.shape(mask_logits)[2] height *= self._stride width *= self._stride mask_logits = resize_instance_masks(mask_logits, (height, width)) mask_logits = crop_masks_within_boxes( mask_logits, boxes, self._deepmac_params.postprocess_crop_size) masks_prob = tf.nn.sigmoid(mask_logits) return masks_prob def _transform_boxes_to_feature_coordinates(self, provided_boxes, true_image_shapes, resized_image_shape, instance_embedding): """Transforms normalzied boxes to feature map coordinates. Args: provided_boxes: A [batch, num_instances, 4] float tensor containing normalized bounding boxes. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. resized_image_shape: A 4D int32 tensor containing shapes of the preprocessed inputs (N, H, W, C). instance_embedding: A [batch, output_height, output_width, embedding_size] float tensor containing instance embeddings. Returns: A float tensor of size [batch, num_instances, 4] containing boxes whose coordinates have been transformed to the absolute output space of the feature extractor. """ # Input boxes must be normalized. shape_utils.assert_box_normalized(provided_boxes) # Transform the provided boxes to the absolute output space of the feature # extractor. height, width = (tf.shape(instance_embedding)[1], tf.shape(instance_embedding)[2]) resized_image_height = resized_image_shape[1] resized_image_width = resized_image_shape[2] def transform_boxes(elems): boxes_per_image, true_image_shape = elems blist = box_list.BoxList(boxes_per_image) # First transform boxes from image space to resized image space since # there may have paddings in the resized images. blist = box_list_ops.scale(blist, true_image_shape[0] / resized_image_height, true_image_shape[1] / resized_image_width) # Then transform boxes from resized image space (normalized) to the # feature map space (absolute). blist = box_list_ops.to_absolute_coordinates( blist, height, width, check_range=False) return blist.get() return tf.map_fn( transform_boxes, [provided_boxes, true_image_shapes], dtype=tf.float32) def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, provided_boxes, **params): """Produces masks for the provided boxes. Args: prediction_dict: a dictionary holding predicted tensors from "predict" function. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. provided_boxes: float tensor of shape [batch, num_boxes, 4] containing boxes coordinates (normalized) from which we will produce masks. **params: Currently ignored. Returns: detections: a dictionary containing the following fields detection_masks: (Optional) A uint8 tensor of shape [batch, max_detections, mask_height, mask_width] with masks for each detection. Background is specified with 0, and foreground is specified with positive integers (1 for standard instance segmentation mask, and 1-indexed parts for DensePose task). And all other fields returned by the super class method. """ postprocess_dict = super(DeepMACMetaArch, self).postprocess(prediction_dict, true_image_shapes, **params) instance_embedding = prediction_dict[INSTANCE_EMBEDDING][-1] resized_image_shapes = shape_utils.combined_static_and_dynamic_shape( prediction_dict['preprocessed_inputs']) boxes_strided = self._transform_boxes_to_feature_coordinates( provided_boxes, true_image_shapes, resized_image_shapes, instance_embedding) if self._deepmac_params is not None: masks = self._postprocess_masks( boxes_strided, instance_embedding, prediction_dict[PIXEL_EMBEDDING][-1]) postprocess_dict[fields.DetectionResultFields.detection_masks] = masks return postprocess_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/deepmac_meta_arch.py
deepmac_meta_arch.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.core import standard_fields as fields from object_detection.meta_architectures import context_rcnn_lib from object_detection.meta_architectures import context_rcnn_lib_tf2 from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.protos import faster_rcnn_pb2 from object_detection.utils import ops from object_detection.utils import tf_version _UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): """Context R-CNN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=1.0, hard_example_miner=None, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=True, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False, attention_bottleneck_dimension=None, attention_temperature=None, use_self_attention=False, use_long_term_attention=True, self_attention_in_sequence=False, num_attention_heads=1, num_attention_layers=1, attention_position=( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER) ): """ContextRCNNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable takes a rank-3 image tensor of shape [height, width, channels] (corresponding to a single image), an optional rank-3 instance mask tensor of shape [num_masks, height, width] and returns a resized rank-3 image tensor, a resized mask tensor if one was provided in the input. In addition this callable must also return a 1-D tensor of the form [height, width, channels] containing the size of the true image, as the image resizer can perform zero padding. See protos/image_resizer.proto. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: An integer values taking values in {1, 2, 3}. If 1, the function will construct only the Region Proposal Network (RPN) part of the model. If 2, the function will perform box refinement and other auxiliary predictions all in the second stage. If 3, it will extract features from refined boxes and perform the auxiliary predictions on the non-maximum suppressed refined boxes. If is_training is true and the value of number_of_stages is 3, it is reduced to 2 since all the model heads are trained in parallel in second stage during training. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of Faster R-CNN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: Sampler to use for first stage loss (RPN loss). first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. initial_crop_size: A single integer indicating the output size (width and height are set to be the same) of the initial bilinear interpolation based cropping during ROI pooling. maxpool_kernel_size: A single integer indicating the kernel size of the max pool op on the cropped feature map during ROI pooling. maxpool_stride: A single integer indicating the stride of the max pool op on the cropped feature map during ROI pooling. second_stage_target_assigner: Target assigner to use for second stage of Faster R-CNN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for the second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: Sampler to use for second stage loss (box classifier loss). second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float indicating the scale factor for second stage localization loss. second_stage_classification_loss_weight: A float indicating the scale factor for second stage classification loss. second_stage_classification_loss: Classification loss used by the second stage classifier. Either losses.WeightedSigmoidClassificationLoss or losses.WeightedSoftmaxClassificationLoss. second_stage_mask_prediction_loss_weight: A float indicating the scale factor for second stage mask prediction loss. This is applicable only if second stage box predictor is configured to predict masks. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: Normally, anchors generated for a given image size are pruned during training if they lie outside the image window. This option clips the anchors to be within the image instead of pruning. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters in the first stage box predictor during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the feature map based on the final box prediction and returns it in the output dict as detection_features. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map based on the final box prediction and returns it in the output dict as detection_features. attention_bottleneck_dimension: A single integer. The bottleneck feature dimension of the attention block. attention_temperature: A single float. The attention temperature. use_self_attention: Whether to use self-attention within the box features in the current frame. use_long_term_attention: Whether to use attention into the context features. self_attention_in_sequence: Whether self attention and long term attention are in sequence or parallel. num_attention_heads: The number of attention heads to use. num_attention_layers: The number of attention layers to use. attention_position: Whether attention should occur post rpn or post box classifier. Options are specified in the faster rcnn proto, default is post box classifier. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at training time. ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ super(ContextRCNNMetaArch, self).__init__( is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_target_assigner, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), hard_example_miner=hard_example_miner, parallel_iterations=parallel_iterations, add_summaries=add_summaries, clip_anchors_to_image=clip_anchors_to_image, use_static_shapes=use_static_shapes, resize_masks=resize_masks, freeze_batchnorm=freeze_batchnorm, return_raw_detections_during_predict=( return_raw_detections_during_predict), output_final_box_features=output_final_box_features, output_final_box_rpn_features=output_final_box_rpn_features) self._attention_position = attention_position if tf_version.is_tf1(): self._context_feature_extract_fn = functools.partial( context_rcnn_lib._compute_box_context_attention, bottleneck_dimension=attention_bottleneck_dimension, attention_temperature=attention_temperature, is_training=is_training, max_num_proposals=self.max_num_proposals, use_self_attention=use_self_attention, use_long_term_attention=use_long_term_attention, self_attention_in_sequence=self_attention_in_sequence, num_attention_heads=num_attention_heads, num_attention_layers=num_attention_layers) else: if use_self_attention: raise NotImplementedError if self_attention_in_sequence: raise NotImplementedError if not use_long_term_attention: raise NotImplementedError if num_attention_heads > 1: raise NotImplementedError if num_attention_layers > 1: raise NotImplementedError self._context_feature_extract_fn = context_rcnn_lib_tf2.AttentionBlock( bottleneck_dimension=attention_bottleneck_dimension, attention_temperature=attention_temperature, is_training=is_training, max_num_proposals=self.max_num_proposals) @staticmethod def get_side_inputs(features): """Overrides the get_side_inputs function in the base class. This function returns context_features and valid_context_size, which will be used in the _compute_second_stage_input_feature_maps function. Args: features: A dictionary of tensors. Returns: A dictionary of tensors contains context_features and valid_context_size. Raises: ValueError: If context_features or valid_context_size is not in the features. """ if (fields.InputDataFields.context_features not in features or fields.InputDataFields.valid_context_size not in features): raise ValueError( 'Please make sure context_features and valid_context_size are in the ' 'features') return { fields.InputDataFields.context_features: features[fields.InputDataFields.context_features], fields.InputDataFields.valid_context_size: features[fields.InputDataFields.valid_context_size] } def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape, true_image_shapes, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. """ proposal_boxes_normalized, num_proposals = self._proposal_postprocess( rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes) prediction_dict = self._box_prediction(rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, num_proposals, **side_inputs) prediction_dict['num_proposals'] = num_proposals return prediction_dict def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, image_shape, true_image_shapes, num_proposals, **side_inputs): """Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape [batch_size, height_i, width_i, depth] representing image features to crop using the proposal boxes predicted by the RPN. proposal_boxes_normalized: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. num_proposals: The number of valid box proposals. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D float32 tensor with shape [total_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals. If using a shared box across classes the shape will instead be [total_num_proposals, 1, self._box_coder.code_size]. 2) class_predictions_with_background: a 3-D float32 tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in absolute coordinates. 4) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes in normalized coordinates. Can be used to override the boxes proposed by the RPN, thus enabling one to extract features and get box classification and prediction for externally selected areas of the image. 5) box_classifier_features: a 4-D float32/bfloat16 tensor representing the features for each proposal. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 6) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, num_classes, 4] in normalized coordinates. 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals, num_classes]. 8) final_anchors: a 3-D float tensor of shape [batch_size, self.max_num_proposals, 4] containing the reference anchors for raw detection boxes in normalized coordinates. """ flattened_proposal_feature_maps = ( self._compute_second_stage_input_feature_maps( rpn_features_to_crop, proposal_boxes_normalized, image_shape, num_proposals, **side_inputs)) box_classifier_features = self._extract_box_classifier_features( flattened_proposal_feature_maps, num_proposals, **side_inputs) if self._mask_rcnn_box_predictor.is_keras_model: box_predictions = self._mask_rcnn_box_predictor( [box_classifier_features], prediction_stage=2) else: box_predictions = self._mask_rcnn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, prediction_stage=2) refined_box_encodings = tf.squeeze( box_predictions[box_predictor.BOX_ENCODINGS], axis=1, name='all_refined_box_encodings') class_predictions_with_background = tf.squeeze( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1, name='all_class_predictions_with_background') absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, self._parallel_iterations) prediction_dict = { 'refined_box_encodings': tf.cast(refined_box_encodings, dtype=tf.float32), 'class_predictions_with_background': tf.cast(class_predictions_with_background, dtype=tf.float32), 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': proposal_boxes_normalized } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) return prediction_dict def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized, image_shape, num_proposals, context_features, valid_context_size): """Crops to a set of proposals from the feature map for a batch of images. This function overrides the one in the FasterRCNNMetaArch. Aside from cropping and resizing the feature maps, which is done in the parent class, it adds context attention features to the box features. Args: features_to_crop: A float32 Tensor with shape [batch_size, height, width, depth] proposal_boxes_normalized: A float32 Tensor with shape [batch_size, num_proposals, box_code_size] containing proposal boxes in normalized coordinates. image_shape: A 1D int32 tensors of size [4] containing the image shape. num_proposals: The number of valid box proposals. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. Returns: A float32 Tensor with shape [K, new_height, new_width, depth]. """ del image_shape box_features = self._crop_and_resize_fn( features_to_crop, proposal_boxes_normalized, None, [self._initial_crop_size, self._initial_crop_size]) flattened_box_features = self._flatten_first_two_dimensions(box_features) flattened_box_features = self._maxpool_layer(flattened_box_features) if self._attention_position == ( faster_rcnn_pb2.AttentionPosition.POST_RPN): attention_features = self._context_feature_extract_fn( box_features=flattened_box_features, num_proposals=num_proposals, context_features=context_features, valid_context_size=valid_context_size) # Adds box features with attention features. flattened_box_features += self._flatten_first_two_dimensions( attention_features) return flattened_box_features def _extract_box_classifier_features( self, flattened_box_features, num_proposals, context_features, valid_context_size, attention_position=( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER)): if self._feature_extractor_for_box_classifier_features == ( _UNINITIALIZED_FEATURE_EXTRACTOR): self._feature_extractor_for_box_classifier_features = ( self._feature_extractor.get_box_classifier_feature_extractor_model( name=self.second_stage_feature_extractor_scope)) if self._feature_extractor_for_box_classifier_features: box_classifier_features = ( self._feature_extractor_for_box_classifier_features( flattened_box_features)) else: box_classifier_features = ( self._feature_extractor.extract_box_classifier_features( flattened_box_features, scope=self.second_stage_feature_extractor_scope)) if self._attention_position == ( faster_rcnn_pb2.AttentionPosition.POST_BOX_CLASSIFIER): attention_features = self._context_feature_extract_fn( box_features=box_classifier_features, num_proposals=num_proposals, context_features=context_features, valid_context_size=valid_context_size) # Adds box features with attention features. box_classifier_features += self._flatten_first_two_dimensions( attention_features) return box_classifier_features
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_meta_arch.py
context_rcnn_meta_arch.py
import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.utils import ops class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): """R-FCN Meta-architecture definition.""" def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, second_stage_target_assigner, second_stage_rfcn_box_predictor, second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, hard_example_miner, parallel_iterations=16, add_summaries=True, clip_anchors_to_image=False, use_static_shapes=False, resize_masks=False, freeze_batchnorm=False, return_raw_detections_during_predict=False, output_final_box_features=False, output_final_box_rpn_features=False): """RFCNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. feature_extractor: A FasterRCNNFeatureExtractor object. number_of_stages: Valid values are {1, 2}. If 1 will only construct the Region Proposal Network (RPN) part of the model. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_target_assigner: Target assigner to use for first stage of R-FCN (RPN). first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope_fn: Either a Keras layer hyperparams object or a function to construct tf-slim arg_scope for conv2d, separable_conv2d and fully_connected ops. Used for the RPN box predictor. If it is a keras hyperparams object the RPN box predictor will be a Keras model. If it is a function to construct an arg scope it will be a tf-slim box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_sampler: The sampler for the boxes used to calculate the RPN loss after the first stage. first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window`(with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`. This is used to perform non max suppression on the boxes predicted by the Region Proposal Network (RPN). See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float crop_and_resize_fn: A differentiable resampler to use for cropping RPN proposal features. second_stage_target_assigner: Target assigner to use for second stage of R-FCN. If the model is configured with multiple prediction heads, this target assigner is used to generate targets for all heads (with the correct `unmatched_class_label`). second_stage_rfcn_box_predictor: RFCN box predictor to use for second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_sampler: The sampler for the boxes used for second stage box classifier. second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float second_stage_classification_loss_weight: A float second_stage_classification_loss: A string indicating which loss function to use, supports 'softmax' and 'sigmoid'. hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. clip_anchors_to_image: The anchors generated are clip to the window size without filtering the nonoverlapping anchors. This generates a static number of anchors. This argument is unused. use_static_shapes: If True, uses implementation of ops with static shape guarantees. resize_masks: Indicates whether the masks presend in the groundtruth should be resized in the model with `image_resizer_fn` freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. output_final_box_features: Whether to output final box features. If true, it crops the feature map based on the final box prediction and returns it in the dict as detection_features. output_final_box_rpn_features: Whether to output rpn box features. If true, it crops the rpn feature map based on the final box prediction and returns it in the dict as detection_features. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator. """ # TODO(rathodv): add_summaries and crop_and_resize_fn is currently # unused. Respect that directive in the future. super(RFCNMetaArch, self).__init__( is_training, num_classes, image_resizer_fn, feature_extractor, number_of_stages, first_stage_anchor_generator, first_stage_target_assigner, first_stage_atrous_rate, first_stage_box_predictor_arg_scope_fn, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_sampler, first_stage_non_max_suppression_fn, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, crop_and_resize_fn, None, # initial_crop_size is not used in R-FCN None, # maxpool_kernel_size is not use in R-FCN None, # maxpool_stride is not use in R-FCN second_stage_target_assigner, None, # fully_connected_box_predictor is not used in R-FCN. second_stage_batch_size, second_stage_sampler, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, second_stage_classification_loss, 1.0, # second stage mask prediction loss weight isn't used in R-FCN. hard_example_miner, parallel_iterations, add_summaries, clip_anchors_to_image, use_static_shapes, resize_masks, freeze_batchnorm=freeze_batchnorm, return_raw_detections_during_predict=( return_raw_detections_during_predict), output_final_box_features=output_final_box_features, output_final_box_rpn_features=output_final_box_rpn_features) self._rfcn_box_predictor = second_stage_rfcn_box_predictor def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features, anchors, image_shape, true_image_shapes): """Predicts the output tensors from 2nd stage of R-FCN. Args: rpn_box_encodings: 3-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features: A list of single 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features from the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, 4] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals 2) class_predictions_with_background: a 2-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in absolute coordinates). 5) proposal_boxes_normalized: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in normalized coordinates). Can be used to override the boxes proposed by the RPN, thus enabling one to extract box classification and prediction for externally selected areas of the image. 6) box_classifier_features: a 4-D float32 tensor, of shape [batch_size, feature_map_height, feature_map_width, depth], representing the box classifier features. """ image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0), [image_shape[0], 1]) (proposal_boxes_normalized, _, _, num_proposals, _, _) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape_2d, true_image_shapes) rpn_features = rpn_features[0] box_classifier_features = ( self._extract_box_classifier_features(rpn_features)) if self._rfcn_box_predictor.is_keras_model: box_predictions = self._rfcn_box_predictor( [box_classifier_features], proposal_boxes=proposal_boxes_normalized) else: box_predictions = self._rfcn_box_predictor.predict( [box_classifier_features], num_predictions_per_location=[1], scope=self.second_stage_box_predictor_scope, proposal_boxes=proposal_boxes_normalized) refined_box_encodings = tf.squeeze( tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1), axis=1) class_predictions_with_background = tf.squeeze( tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1), axis=1) absolute_proposal_boxes = ops.normalized_to_image_coordinates( proposal_boxes_normalized, image_shape, parallel_iterations=self._parallel_iterations) prediction_dict = { 'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': absolute_proposal_boxes, 'box_classifier_features': box_classifier_features, 'proposal_boxes_normalized': proposal_boxes_normalized, 'final_anchors': absolute_proposal_boxes } if self._return_raw_detections_during_predict: prediction_dict.update(self._raw_detections_and_feature_map_inds( refined_box_encodings, absolute_proposal_boxes)) return prediction_dict def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ reg_losses = super(RFCNMetaArch, self).regularization_losses() if self._rfcn_box_predictor.is_keras_model: reg_losses.extend(self._rfcn_box_predictor.losses) return reg_losses def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = super(RFCNMetaArch, self).updates() if self._rfcn_box_predictor.is_keras_model: update_ops.extend( self._rfcn_box_predictor.get_updates_for(None)) update_ops.extend( self._rfcn_box_predictor.get_updates_for( self._rfcn_box_predictor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/rfcn_meta_arch.py
rfcn_meta_arch.py
"""Library functions for Context R-CNN.""" import tensorflow as tf from object_detection.core import freezable_batch_norm # The negative value used in padding the invalid weights. _NEGATIVE_PADDING_VALUE = -100000 class ContextProjection(tf.keras.layers.Layer): """Custom layer to do batch normalization and projection.""" def __init__(self, projection_dimension, **kwargs): self.batch_norm = freezable_batch_norm.FreezableBatchNorm( epsilon=0.001, center=True, scale=True, momentum=0.97, trainable=True) self.projection = tf.keras.layers.Dense(units=projection_dimension, use_bias=True) self.projection_dimension = projection_dimension super(ContextProjection, self).__init__(**kwargs) def build(self, input_shape): self.projection.build(input_shape) self.batch_norm.build(input_shape[:1] + [self.projection_dimension]) def call(self, input_features, is_training=False): return tf.nn.relu6(self.batch_norm(self.projection(input_features), is_training)) class AttentionBlock(tf.keras.layers.Layer): """Custom layer to perform all attention.""" def __init__(self, bottleneck_dimension, attention_temperature, output_dimension=None, is_training=False, name='AttentionBlock', max_num_proposals=100, **kwargs): """Constructs an attention block. Args: bottleneck_dimension: A int32 Tensor representing the bottleneck dimension for intermediate projections. attention_temperature: A float Tensor. It controls the temperature of the softmax for weights calculation. The formula for calculation as follows: weights = exp(weights / temperature) / sum(exp(weights / temperature)) output_dimension: A int32 Tensor representing the last dimension of the output feature. is_training: A boolean Tensor (affecting batch normalization). name: A string describing what to name the variables in this block. max_num_proposals: The number of box proposals for each image **kwargs: Additional keyword arguments. """ self._key_proj = ContextProjection(bottleneck_dimension) self._val_proj = ContextProjection(bottleneck_dimension) self._query_proj = ContextProjection(bottleneck_dimension) self._feature_proj = None self._attention_temperature = attention_temperature self._bottleneck_dimension = bottleneck_dimension self._is_training = is_training self._output_dimension = output_dimension self._max_num_proposals = max_num_proposals if self._output_dimension: self._feature_proj = ContextProjection(self._output_dimension) super(AttentionBlock, self).__init__(name=name, **kwargs) def build(self, input_shapes): """Finishes building the attention block. Args: input_shapes: the shape of the primary input box features. """ if not self._feature_proj: self._output_dimension = input_shapes[-1] self._feature_proj = ContextProjection(self._output_dimension) def call(self, box_features, context_features, valid_context_size, num_proposals): """Handles a call by performing attention. Args: box_features: A float Tensor of shape [batch_size * input_size, height, width, num_input_features]. context_features: A float Tensor of shape [batch_size, context_size, num_context_features]. valid_context_size: A int32 Tensor of shape [batch_size]. num_proposals: A [batch_size] int32 Tensor specifying the number of valid proposals per image in the batch. Returns: A float Tensor with shape [batch_size, input_size, num_input_features] containing output features after attention with context features. """ _, context_size, _ = context_features.shape keys_values_valid_mask = compute_valid_mask( valid_context_size, context_size) total_proposals, height, width, channels = box_features.shape batch_size = total_proposals // self._max_num_proposals box_features = tf.reshape( box_features, [batch_size, self._max_num_proposals, height, width, channels]) # Average pools over height and width dimension so that the shape of # box_features becomes [batch_size, max_num_proposals, channels]. box_features = tf.reduce_mean(box_features, [2, 3]) queries_valid_mask = compute_valid_mask(num_proposals, box_features.shape[1]) queries = project_features( box_features, self._bottleneck_dimension, self._is_training, self._query_proj, normalize=True) keys = project_features( context_features, self._bottleneck_dimension, self._is_training, self._key_proj, normalize=True) values = project_features( context_features, self._bottleneck_dimension, self._is_training, self._val_proj, normalize=True) # masking out any keys which are padding keys *= tf.cast(keys_values_valid_mask[..., tf.newaxis], keys.dtype) queries *= tf.cast(queries_valid_mask[..., tf.newaxis], queries.dtype) weights = tf.matmul(queries, keys, transpose_b=True) weights, values = filter_weight_value(weights, values, keys_values_valid_mask) weights = tf.nn.softmax(weights / self._attention_temperature) features = tf.matmul(weights, values) output_features = project_features( features, self._output_dimension, self._is_training, self._feature_proj, normalize=False) output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] return output_features def filter_weight_value(weights, values, valid_mask): """Filters weights and values based on valid_mask. _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to avoid their contribution in softmax. 0 will be set for the invalid elements in the values. Args: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means valid and False means invalid. Returns: weights: A float Tensor of shape [batch_size, input_size, context_size]. values: A float Tensor of shape [batch_size, context_size, projected_dimension]. Raises: ValueError: If shape of doesn't match. """ w_batch_size, _, w_context_size = weights.shape v_batch_size, v_context_size, _ = values.shape m_batch_size, m_context_size = valid_mask.shape if w_batch_size != v_batch_size or v_batch_size != m_batch_size: raise ValueError('Please make sure the first dimension of the input' ' tensors are the same.') if w_context_size != v_context_size: raise ValueError('Please make sure the third dimension of weights matches' ' the second dimension of values.') if w_context_size != m_context_size: raise ValueError('Please make sure the third dimension of the weights' ' matches the second dimension of the valid_mask.') valid_mask = valid_mask[..., tf.newaxis] # Force the invalid weights to be very negative so it won't contribute to # the softmax. weights += tf.transpose( tf.cast(tf.math.logical_not(valid_mask), weights.dtype) * _NEGATIVE_PADDING_VALUE, perm=[0, 2, 1]) # Force the invalid values to be 0. values *= tf.cast(valid_mask, values.dtype) return weights, values def project_features(features, bottleneck_dimension, is_training, layer, normalize=True): """Projects features to another feature space. Args: features: A float Tensor of shape [batch_size, features_size, num_features]. bottleneck_dimension: A int32 Tensor. is_training: A boolean Tensor (affecting batch normalization). layer: Contains a custom layer specific to the particular operation being performed (key, value, query, features) normalize: A boolean Tensor. If true, the output features will be l2 normalized on the last dimension. Returns: A float Tensor of shape [batch, features_size, projection_dimension]. """ shape_arr = features.shape batch_size, _, num_features = shape_arr features = tf.reshape(features, [-1, num_features]) projected_features = layer(features, is_training) projected_features = tf.reshape(projected_features, [batch_size, -1, bottleneck_dimension]) if normalize: projected_features = tf.keras.backend.l2_normalize(projected_features, axis=-1) return projected_features def compute_valid_mask(num_valid_elements, num_elements): """Computes mask of valid entries within padded context feature. Args: num_valid_elements: A int32 Tensor of shape [batch_size]. num_elements: An int32 Tensor. Returns: A boolean Tensor of the shape [batch_size, num_elements]. True means valid and False means invalid. """ batch_size = num_valid_elements.shape[0] element_idxs = tf.range(num_elements, dtype=tf.int32) batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) num_valid_elements = num_valid_elements[..., tf.newaxis] valid_mask = tf.less(batch_element_idxs, num_valid_elements) return valid_mask
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/context_rcnn_lib_tf2.py
context_rcnn_lib_tf2.py
import abc import tensorflow.compat.v1 as tf from tensorflow.python.util.deprecation import deprecated_args from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import matcher from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.core import target_assigner from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import variables_helper from object_detection.utils import visualization_utils # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top class SSDFeatureExtractor(object): """SSD Slim Feature Extractor definition.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False): """Constructor. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ self._is_training = is_training self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._pad_to_multiple = pad_to_multiple self._conv_hyperparams_fn = conv_hyperparams_fn self._reuse_weights = reuse_weights self._use_explicit_padding = use_explicit_padding self._use_depthwise = use_depthwise self._num_layers = num_layers self._override_base_feature_extractor_hyperparams = ( override_base_feature_extractor_hyperparams) @property def is_keras_model(self): return False @abc.abstractmethod def preprocess(self, resized_inputs): """Preprocesses images for feature extraction (minus image resizing). Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ pass @abc.abstractmethod def extract_features(self, preprocessed_inputs): """Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ raise NotImplementedError def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Args: feature_extractor_scope: A scope name for the feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): var_name = variable.op.name if var_name.startswith(feature_extractor_scope + '/'): var_name = var_name.replace(feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore class SSDKerasFeatureExtractor(tf.keras.Model): """SSD Feature Extractor definition.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, use_explicit_padding=False, use_depthwise=False, num_layers=6, override_base_feature_extractor_hyperparams=False, name=None): """Constructor. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. num_layers: Number of SSD layers. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_config`. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(SSDKerasFeatureExtractor, self).__init__(name=name) self._is_training = is_training self._depth_multiplier = depth_multiplier self._min_depth = min_depth self._pad_to_multiple = pad_to_multiple self._conv_hyperparams = conv_hyperparams self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update self._use_explicit_padding = use_explicit_padding self._use_depthwise = use_depthwise self._num_layers = num_layers self._override_base_feature_extractor_hyperparams = ( override_base_feature_extractor_hyperparams) @property def is_keras_model(self): return True @abc.abstractmethod def preprocess(self, resized_inputs): """Preprocesses images for feature extraction (minus image resizing). Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ raise NotImplementedError @abc.abstractmethod def _extract_features(self, preprocessed_inputs): """Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ raise NotImplementedError # This overrides the keras.Model `call` method with the _extract_features # method. def call(self, inputs, **kwargs): return self._extract_features(inputs) class SSDMetaArch(model.DetectionModel): """SSD Meta-architecture definition.""" @deprecated_args(None, 'NMS is always placed on TPU; do not use nms_on_host ' 'as it has no effect.', 'nms_on_host') def __init__(self, is_training, anchor_generator, box_predictor, box_coder, feature_extractor, encode_background_as_zeros, image_resizer_fn, non_max_suppression_fn, score_conversion_fn, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches, hard_example_miner, target_assigner_instance, add_summaries=True, normalize_loc_loss_by_codesize=False, freeze_batchnorm=False, inplace_batchnorm_update=False, add_background_class=True, explicit_background_class=False, random_example_sampler=None, expected_loss_weights_fn=None, use_confidences_as_targets=False, implicit_example_weight=0.5, equalization_loss_config=None, return_raw_detections_during_predict=False, nms_on_host=True): """SSDMetaArch Constructor. TODO(rathodv,jonathanhuang): group NMS parameters + score converter into a class and loss parameters into a class and write config protos for postprocessing and losses. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. anchor_generator: an anchor_generator.AnchorGenerator object. box_predictor: a box_predictor.BoxPredictor object. box_coder: a box_coder.BoxCoder object. feature_extractor: a SSDFeatureExtractor object. encode_background_as_zeros: boolean determining whether background targets are to be encoded as an all zeros vector or a one-hot vector (where background is the 0th class). image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions and a 1-D tensor of shape [3] indicating shape of true image within the resized image tensor as the resized image tensor could be padded. See builders/image_resizer_builder.py. non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window` inputs (with all other inputs already set) and returns a dictionary hold tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes` and `num_detections`. See `post_processing. batch_multiclass_non_max_suppression` for the type and shape of these tensors. score_conversion_fn: callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. classification_loss: an object_detection.core.losses.Loss object. localization_loss: a object_detection.core.losses.Loss object. classification_loss_weight: float localization_loss_weight: float normalize_loss_by_num_matches: boolean hard_example_miner: a losses.HardExampleMiner object (can be None) target_assigner_instance: target_assigner.TargetAssigner instance to use. add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph. normalize_loc_loss_by_codesize: whether to normalize localization loss by code size of the box encoder. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. add_background_class: Whether to add an implicit background class to one-hot encodings of groundtruth labels. Set to false if training a single class model or using groundtruth labels with an explicit background class. explicit_background_class: Set to true if using groundtruth labels with an explicit background class, as in multiclass scores. random_example_sampler: a BalancedPositiveNegativeSampler object that can perform random example sampling when computing loss. If None, random sampling process is skipped. Note that random example sampler and hard example miner can both be applied to the model. In that case, random sampler will take effect first and hard example miner can only process the random sampled examples. expected_loss_weights_fn: If not None, use to calculate loss by background/foreground weighting. Should take batch_cls_targets as inputs and return foreground_weights, background_weights. See expected_classification_loss_by_expected_sampling and expected_classification_loss_by_reweighting_unmatched_anchors in third_party/tensorflow_models/object_detection/utils/ops.py as examples. use_confidences_as_targets: Whether to use groundtruth_condifences field to assign the targets. implicit_example_weight: a float number that specifies the weight used for the implicit negative examples. equalization_loss_config: a namedtuple that specifies configs for computing equalization loss. return_raw_detections_during_predict: Whether to return raw detection boxes in the predict() method. These are decoded boxes that have not been through postprocessing (i.e. NMS). Default False. nms_on_host: boolean (default: True) controlling whether NMS should be carried out on the host (outside of TPU). """ super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update self._anchor_generator = anchor_generator self._box_predictor = box_predictor self._box_coder = box_coder self._feature_extractor = feature_extractor self._add_background_class = add_background_class self._explicit_background_class = explicit_background_class if add_background_class and explicit_background_class: raise ValueError("Cannot have both 'add_background_class' and" " 'explicit_background_class' true.") # Needed for fine-tuning from classification checkpoints whose # variables do not have the feature extractor scope. if self._feature_extractor.is_keras_model: # Keras feature extractors will have a name they implicitly use to scope. # So, all contained variables are prefixed by this name. # To load from classification checkpoints, need to filter out this name. self._extract_features_scope = feature_extractor.name else: # Slim feature extractors get an explicit naming scope self._extract_features_scope = 'FeatureExtractor' if encode_background_as_zeros: background_class = [0] else: background_class = [1] if self._add_background_class: num_foreground_classes = self.num_classes else: num_foreground_classes = self.num_classes - 1 self._unmatched_class_label = tf.constant( background_class + num_foreground_classes * [0], tf.float32) self._target_assigner = target_assigner_instance self._classification_loss = classification_loss self._localization_loss = localization_loss self._classification_loss_weight = classification_loss_weight self._localization_loss_weight = localization_loss_weight self._normalize_loss_by_num_matches = normalize_loss_by_num_matches self._normalize_loc_loss_by_codesize = normalize_loc_loss_by_codesize self._hard_example_miner = hard_example_miner self._random_example_sampler = random_example_sampler self._parallel_iterations = 16 self._image_resizer_fn = image_resizer_fn self._non_max_suppression_fn = non_max_suppression_fn self._score_conversion_fn = score_conversion_fn self._anchors = None self._add_summaries = add_summaries self._batched_prediction_tensor_names = [] self._expected_loss_weights_fn = expected_loss_weights_fn self._use_confidences_as_targets = use_confidences_as_targets self._implicit_example_weight = implicit_example_weight self._equalization_loss_config = equalization_loss_config self._return_raw_detections_during_predict = ( return_raw_detections_during_predict) @property def feature_extractor(self): return self._feature_extractor @property def anchors(self): if not self._anchors: raise RuntimeError('anchors have not been constructed yet!') if not isinstance(self._anchors, box_list.BoxList): raise RuntimeError('anchors should be a BoxList object, but is not.') return self._anchors @property def batched_prediction_tensor_names(self): if not self._batched_prediction_tensor_names: raise RuntimeError('Must call predict() method to get batched prediction ' 'tensor names.') return self._batched_prediction_tensor_names def preprocess(self, inputs): """Feature-extractor specific preprocessing. SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during post-processing. On calling `preprocess` method, clip_window gets updated based on `true_image_shapes` returned by `image_resizer_fn`. Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Raises: ValueError: if inputs tensor does not have type tf.float32 """ with tf.name_scope('Preprocessor'): normalized_inputs = self._feature_extractor.preprocess(inputs) return shape_utils.resize_images_and_return_shapes( normalized_inputs, self._image_resizer_fn) def _compute_clip_window(self, preprocessed_images, true_image_shapes): """Computes clip window to use during post_processing. Computes a new clip window to use during post-processing based on `resized_image_shapes` and `true_image_shapes` only if `preprocess` method has been called. Otherwise returns a default clip window of [0, 0, 1, 1]. Args: preprocessed_images: the [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Or None if the clip window should cover the full image. Returns: a 2-D float32 tensor of the form [batch_size, 4] containing the clip window for each image in the batch in normalized coordinates (relative to the resized dimensions) where each clip window is of the form [ymin, xmin, ymax, xmax] or a default clip window of [0, 0, 1, 1]. """ if true_image_shapes is None: return tf.constant([0, 0, 1, 1], dtype=tf.float32) resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = tf.unstack( tf.cast(true_image_shapes, dtype=tf.float32), axis=1) padded_height = tf.cast(resized_inputs_shape[1], dtype=tf.float32) padded_width = tf.cast(resized_inputs_shape[2], dtype=tf.float32) return tf.stack( [ tf.zeros_like(true_heights), tf.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width ], axis=1) def predict(self, preprocessed_inputs, true_image_shapes): """Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield unpostprocessesed predictions. A side effect of calling the predict method is that self._anchors is populated with a box_list.BoxList of anchors. These anchors must be constructed before the postprocess or loss functions can be called. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) preprocessed_inputs: the [batch, height, width, channels] image tensor. 2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 3) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 4) feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. 5) anchors: 2-D float tensor of shape [num_anchors, 4] containing the generated anchors in normalized coordinates. 6) final_anchors: 3-D float tensor of shape [batch_size, num_anchors, 4] containing the generated anchors in normalized coordinates. If self._return_raw_detections_during_predict is True, the dictionary will also contain: 7) raw_detection_boxes: a 4-D float32 tensor with shape [batch_size, self.max_num_proposals, 4] in normalized coordinates. 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape [batch_size, self.max_num_proposals]. """ if self._inplace_batchnorm_update: batchnorm_updates_collections = None else: batchnorm_updates_collections = tf.GraphKeys.UPDATE_OPS if self._feature_extractor.is_keras_model: feature_maps = self._feature_extractor(preprocessed_inputs) else: with slim.arg_scope([slim.batch_norm], is_training=(self._is_training and not self._freeze_batchnorm), updates_collections=batchnorm_updates_collections): with tf.variable_scope(None, self._extract_features_scope, [preprocessed_inputs]): feature_maps = self._feature_extractor.extract_features( preprocessed_inputs) feature_map_spatial_dims = self._get_feature_map_spatial_dims( feature_maps) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) boxlist_list = self._anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) self._anchors = box_list_ops.concatenate(boxlist_list) if self._box_predictor.is_keras_model: predictor_results_dict = self._box_predictor(feature_maps) else: with slim.arg_scope([slim.batch_norm], is_training=(self._is_training and not self._freeze_batchnorm), updates_collections=batchnorm_updates_collections): predictor_results_dict = self._box_predictor.predict( feature_maps, self._anchor_generator.num_anchors_per_location()) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'feature_maps': feature_maps, 'anchors': self._anchors.get(), 'final_anchors': tf.tile( tf.expand_dims(self._anchors.get(), 0), [image_shape[0], 1, 1]) } for prediction_key, prediction_list in iter(predictor_results_dict.items()): prediction = tf.concat(prediction_list, axis=1) if (prediction_key == 'box_encodings' and prediction.shape.ndims == 4 and prediction.shape[2] == 1): prediction = tf.squeeze(prediction, axis=2) predictions_dict[prediction_key] = prediction if self._return_raw_detections_during_predict: predictions_dict.update(self._raw_detections_and_feature_map_inds( predictions_dict['box_encodings'], boxlist_list)) self._batched_prediction_tensor_names = [x for x in predictions_dict if x != 'anchors'] return predictions_dict def _raw_detections_and_feature_map_inds(self, box_encodings, boxlist_list): anchors = self._anchors.get() raw_detection_boxes, _ = self._batch_decode(box_encodings, anchors) batch_size, _, _ = shape_utils.combined_static_and_dynamic_shape( raw_detection_boxes) feature_map_indices = ( self._anchor_generator.anchor_index_to_feature_map_index(boxlist_list)) feature_map_indices_batched = tf.tile( tf.expand_dims(feature_map_indices, 0), multiples=[batch_size, 1]) return { fields.PredictionFields.raw_detection_boxes: raw_detection_boxes, fields.PredictionFields.raw_detection_feature_map_indices: feature_map_indices_batched } def _get_feature_map_spatial_dims(self, feature_maps): """Return list of spatial dimensions for each feature map in a list. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each feature map in feature_maps """ feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def postprocess(self, prediction_dict, true_image_shapes): """Converts prediction tensors to final detections. This function converts raw predictions tensors to final detection results by slicing off the background class, decoding box predictions and applying non max suppression and clipping to the image window. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_conversion_fn is used, then scores are remapped (and may thus have a different interpretation). Args: prediction_dict: a dictionary holding prediction tensors with 1) preprocessed_inputs: a [batch, height, width, channels] image tensor. 2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 3) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. 4) mask_predictions: (optional) a 5-D float tensor of shape [batch_size, num_anchors, q, mask_height, mask_width]. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Or None, if the clip window should cover the full image. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detections, 4] tensor with post-processed detection boxes. detection_scores: [batch, max_detections] tensor with scalar scores for post-processed detection boxes. detection_multiclass_scores: [batch, max_detections, num_classes_with_background] tensor with class score distribution for post-processed detection boxes including background class if any. detection_classes: [batch, max_detections] tensor with classes for post-processed detection classes. detection_keypoints: [batch, max_detections, num_keypoints, 2] (if encoded in the prediction_dict 'box_encodings') detection_masks: [batch_size, max_detections, mask_height, mask_width] (optional) num_detections: [batch] raw_detection_boxes: [batch, total_detections, 4] tensor with decoded detection boxes before Non-Max Suppression. raw_detection_score: [batch, total_detections, num_classes_with_background] tensor of multi-class scores for raw detection boxes. Raises: ValueError: if prediction_dict does not contain `box_encodings` or `class_predictions_with_background` fields. """ if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') if 'anchors' not in prediction_dict: prediction_dict['anchors'] = self.anchors.get() with tf.name_scope('Postprocessor'): preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = tf.identity(box_encodings, 'raw_box_encodings') class_predictions_with_background = ( prediction_dict['class_predictions_with_background']) detection_boxes, detection_keypoints = self._batch_decode( box_encodings, prediction_dict['anchors']) detection_boxes = tf.identity(detection_boxes, 'raw_box_locations') detection_boxes = tf.expand_dims(detection_boxes, axis=2) detection_scores_with_background = self._score_conversion_fn( class_predictions_with_background) detection_scores = tf.identity(detection_scores_with_background, 'raw_box_scores') if self._add_background_class or self._explicit_background_class: detection_scores = tf.slice(detection_scores, [0, 0, 1], [-1, -1, -1]) additional_fields = None batch_size = ( shape_utils.combined_static_and_dynamic_shape(preprocessed_images)[0]) if 'feature_maps' in prediction_dict: feature_map_list = [] for feature_map in prediction_dict['feature_maps']: feature_map_list.append(tf.reshape(feature_map, [batch_size, -1])) box_features = tf.concat(feature_map_list, 1) box_features = tf.identity(box_features, 'raw_box_features') additional_fields = { 'multiclass_scores': detection_scores_with_background } if self._anchors is not None: num_boxes = (self._anchors.num_boxes_static() or self._anchors.num_boxes()) anchor_indices = tf.range(num_boxes) batch_anchor_indices = tf.tile( tf.expand_dims(anchor_indices, 0), [batch_size, 1]) # All additional fields need to be float. additional_fields.update({ 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32), }) if detection_keypoints is not None: detection_keypoints = tf.identity( detection_keypoints, 'raw_keypoint_locations') additional_fields[fields.BoxListFields.keypoints] = detection_keypoints (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections) = self._non_max_suppression_fn( detection_boxes, detection_scores, clip_window=self._compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields, masks=prediction_dict.get('mask_predictions')) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: tf.cast(num_detections, dtype=tf.float32), fields.DetectionResultFields.raw_detection_boxes: tf.squeeze(detection_boxes, axis=2), fields.DetectionResultFields.raw_detection_scores: detection_scores_with_background } if (nmsed_additional_fields is not None and fields.InputDataFields.multiclass_scores in nmsed_additional_fields): detection_dict[ fields.DetectionResultFields.detection_multiclass_scores] = ( nmsed_additional_fields[ fields.InputDataFields.multiclass_scores]) if (nmsed_additional_fields is not None and 'anchor_indices' in nmsed_additional_fields): detection_dict.update({ fields.DetectionResultFields.detection_anchor_indices: tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), }) if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) if nmsed_masks is not None: detection_dict[ fields.DetectionResultFields.detection_masks] = nmsed_masks return detection_dict def loss(self, prediction_dict, true_image_shapes, scope=None): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding prediction tensors with 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. scope: Optional scope name. Returns: a dictionary mapping loss keys (`localization_loss` and `classification_loss`) to scalar tensors representing corresponding loss values. """ with tf.name_scope(scope, 'Loss', prediction_dict.values()): keypoints = None if self.groundtruth_has_field(fields.BoxListFields.keypoints): keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints) weights = None if self.groundtruth_has_field(fields.BoxListFields.weights): weights = self.groundtruth_lists(fields.BoxListFields.weights) confidences = None if self.groundtruth_has_field(fields.BoxListFields.confidences): confidences = self.groundtruth_lists(fields.BoxListFields.confidences) (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) = self._assign_targets( self.groundtruth_lists(fields.BoxListFields.boxes), self.groundtruth_lists(fields.BoxListFields.classes), keypoints, weights, confidences) match_list = [matcher.Match(match) for match in tf.unstack(batch_match)] if self._add_summaries: self._summarize_target_assignment( self.groundtruth_lists(fields.BoxListFields.boxes), match_list) if self._random_example_sampler: batch_cls_per_anchor_weights = tf.reduce_mean( batch_cls_weights, axis=-1) batch_sampled_indicator = tf.cast( shape_utils.static_or_dynamic_map_fn( self._minibatch_subsample_fn, [batch_cls_targets, batch_cls_per_anchor_weights], dtype=tf.bool, parallel_iterations=self._parallel_iterations, back_prop=True), dtype=tf.float32) batch_reg_weights = tf.multiply(batch_sampled_indicator, batch_reg_weights) batch_cls_weights = tf.multiply( tf.expand_dims(batch_sampled_indicator, -1), batch_cls_weights) losses_mask = None if self.groundtruth_has_field(fields.InputDataFields.is_annotated): losses_mask = tf.stack(self.groundtruth_lists( fields.InputDataFields.is_annotated)) location_losses = self._localization_loss( prediction_dict['box_encodings'], batch_reg_targets, ignore_nan_targets=True, weights=batch_reg_weights, losses_mask=losses_mask) cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=batch_cls_weights, losses_mask=losses_mask) if self._expected_loss_weights_fn: # Need to compute losses for assigned targets against the # unmatched_class_label as well as their assigned targets. # simplest thing (but wasteful) is just to calculate all losses # twice batch_size, num_anchors, num_classes = batch_cls_targets.get_shape() unmatched_targets = tf.ones([batch_size, num_anchors, 1 ]) * self._unmatched_class_label unmatched_cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], unmatched_targets, weights=batch_cls_weights, losses_mask=losses_mask) if cls_losses.get_shape().ndims == 3: batch_size, num_anchors, num_classes = cls_losses.get_shape() cls_losses = tf.reshape(cls_losses, [batch_size, -1]) unmatched_cls_losses = tf.reshape(unmatched_cls_losses, [batch_size, -1]) batch_cls_targets = tf.reshape( batch_cls_targets, [batch_size, num_anchors * num_classes, -1]) batch_cls_targets = tf.concat( [1 - batch_cls_targets, batch_cls_targets], axis=-1) location_losses = tf.tile(location_losses, [1, num_classes]) foreground_weights, background_weights = ( self._expected_loss_weights_fn(batch_cls_targets)) cls_losses = ( foreground_weights * cls_losses + background_weights * unmatched_cls_losses) location_losses *= foreground_weights classification_loss = tf.reduce_sum(cls_losses) localization_loss = tf.reduce_sum(location_losses) elif self._hard_example_miner: cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) (localization_loss, classification_loss) = self._apply_hard_mining( location_losses, cls_losses, prediction_dict, match_list) if self._add_summaries: self._hard_example_miner.summarize() else: cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) localization_loss = tf.reduce_sum(location_losses) classification_loss = tf.reduce_sum(cls_losses) # Optionally normalize by number of positive matches normalizer = tf.constant(1.0, dtype=tf.float32) if self._normalize_loss_by_num_matches: normalizer = tf.maximum(tf.cast(tf.reduce_sum(batch_reg_weights), dtype=tf.float32), 1.0) localization_loss_normalizer = normalizer if self._normalize_loc_loss_by_codesize: localization_loss_normalizer *= self._box_coder.code_size localization_loss = tf.multiply((self._localization_loss_weight / localization_loss_normalizer), localization_loss, name='localization_loss') classification_loss = tf.multiply((self._classification_loss_weight / normalizer), classification_loss, name='classification_loss') loss_dict = { 'Loss/localization_loss': localization_loss, 'Loss/classification_loss': classification_loss } return loss_dict def _minibatch_subsample_fn(self, inputs): """Randomly samples anchors for one image. Args: inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, num_classes] indicating targets assigned to each anchor. Second one is a tensor of shape [num_anchors] indicating the class weight of each anchor. Returns: batch_sampled_indicator: bool tensor of shape [num_anchors] indicating whether the anchor should be selected for loss computation. """ cls_targets, cls_weights = inputs if self._add_background_class: # Set background_class bits to 0 so that the positives_indicator # computation would not consider background class. background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) cls_targets = tf.concat([background_class, regular_class], 1) positives_indicator = tf.reduce_sum(cls_targets, axis=1) return self._random_example_sampler.subsample( tf.cast(cls_weights, tf.bool), batch_size=None, labels=tf.cast(positives_indicator, tf.bool)) def _summarize_anchor_classification_loss(self, class_ids, cls_losses): positive_indices = tf.where(tf.greater(class_ids, 0)) positive_anchor_cls_loss = tf.squeeze( tf.gather(cls_losses, positive_indices), axis=1) visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss, 'PositiveAnchorLossCDF') negative_indices = tf.where(tf.equal(class_ids, 0)) negative_anchor_cls_loss = tf.squeeze( tf.gather(cls_losses, negative_indices), axis=1) visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss, 'NegativeAnchorLossCDF') def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_keypoints_list=None, groundtruth_weights_list=None, groundtruth_confidences_list=None): """Assign groundtruth targets. Adds a background class to each one-hot encoding of groundtruth classes and uses target assigner to obtain regression and classification targets. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape [num_boxes, num_keypoints, 2] groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape [num_boxes, num_classes] containing class confidences for groundtruth boxes. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors], containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. """ groundtruth_boxlists = [ box_list.BoxList(boxes) for boxes in groundtruth_boxes_list ] train_using_confidences = (self._is_training and self._use_confidences_as_targets) if self._add_background_class: groundtruth_classes_with_background_list = [ tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') for one_hot_encoding in groundtruth_classes_list ] if train_using_confidences: groundtruth_confidences_with_background_list = [ tf.pad(groundtruth_confidences, [[0, 0], [1, 0]], mode='CONSTANT') for groundtruth_confidences in groundtruth_confidences_list ] else: groundtruth_classes_with_background_list = groundtruth_classes_list if groundtruth_keypoints_list is not None: for boxlist, keypoints in zip( groundtruth_boxlists, groundtruth_keypoints_list): boxlist.add_field(fields.BoxListFields.keypoints, keypoints) if train_using_confidences: return target_assigner.batch_assign_confidences( self._target_assigner, self.anchors, groundtruth_boxlists, groundtruth_confidences_with_background_list, groundtruth_weights_list, self._unmatched_class_label, self._add_background_class, self._implicit_example_weight) else: return target_assigner.batch_assign_targets( self._target_assigner, self.anchors, groundtruth_boxlists, groundtruth_classes_with_background_list, self._unmatched_class_label, groundtruth_weights_list) def _summarize_target_assignment(self, groundtruth_boxes_list, match_list): """Creates tensorflow summaries for the input boxes and anchors. This function creates four summaries corresponding to the average number (over images in a batch) of (1) groundtruth boxes, (2) anchors marked as positive, (3) anchors marked as negative, and (4) anchors marked as ignored. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing corners of the groundtruth boxes. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. """ # TODO(rathodv): Add a test for these summaries. try: # TODO(kaftan): Integrate these summaries into the v2 style loops with tf.compat.v2.init_scope(): if tf.compat.v2.executing_eagerly(): return except AttributeError: pass avg_num_gt_boxes = tf.reduce_mean( tf.cast( tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]), dtype=tf.float32)) avg_num_matched_gt_boxes = tf.reduce_mean( tf.cast( tf.stack([match.num_matched_rows() for match in match_list]), dtype=tf.float32)) avg_pos_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_matched_columns() for match in match_list]), dtype=tf.float32)) avg_neg_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_unmatched_columns() for match in match_list]), dtype=tf.float32)) avg_ignored_anchors = tf.reduce_mean( tf.cast( tf.stack([match.num_ignored_columns() for match in match_list]), dtype=tf.float32)) tf.summary.scalar('AvgNumGroundtruthBoxesPerImage', avg_num_gt_boxes, family='TargetAssignment') tf.summary.scalar('AvgNumGroundtruthBoxesMatchedPerImage', avg_num_matched_gt_boxes, family='TargetAssignment') tf.summary.scalar('AvgNumPositiveAnchorsPerImage', avg_pos_anchors, family='TargetAssignment') tf.summary.scalar('AvgNumNegativeAnchorsPerImage', avg_neg_anchors, family='TargetAssignment') tf.summary.scalar('AvgNumIgnoredAnchorsPerImage', avg_ignored_anchors, family='TargetAssignment') def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, match_list): """Applies hard mining to anchorwise losses. Args: location_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise location losses. cls_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise classification losses. prediction_dict: p a dictionary holding prediction tensors with 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. 3) anchors: (optional) 2-D float tensor of shape [num_anchors, 4]. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. """ class_predictions = prediction_dict['class_predictions_with_background'] if self._add_background_class: class_predictions = tf.slice(class_predictions, [0, 0, 1], [-1, -1, -1]) if 'anchors' not in prediction_dict: prediction_dict['anchors'] = self.anchors.get() decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'], prediction_dict['anchors']) decoded_box_tensors_list = tf.unstack(decoded_boxes) class_prediction_list = tf.unstack(class_predictions) decoded_boxlist_list = [] for box_location, box_score in zip(decoded_box_tensors_list, class_prediction_list): decoded_boxlist = box_list.BoxList(box_location) decoded_boxlist.add_field('scores', box_score) decoded_boxlist_list.append(decoded_boxlist) return self._hard_example_miner( location_losses=location_losses, cls_losses=cls_losses, decoded_boxlist_list=decoded_boxlist_list, match_list=match_list) def _batch_decode(self, box_encodings, anchors): """Decodes a batch of box encodings with respect to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. anchors: A tensor of shape [num_anchors, 4]. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present in the input `box_encodings`, None otherwise. """ combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( tf.reshape(tiled_anchor_boxes, [-1, 4])) decoded_boxes = self._box_coder.decode( tf.reshape(box_encodings, [-1, self._box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = tf.reshape( decoded_keypoints, tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack( [combined_shape[0], combined_shape[1], 4])) return decoded_boxes, decoded_keypoints def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ losses = [] slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # Copy the slim losses to avoid modifying the collection if slim_losses: losses.extend(slim_losses) if self._box_predictor.is_keras_model: losses.extend(self._box_predictor.losses) if self._feature_extractor.is_keras_model: losses.extend(self._feature_extractor.losses) return losses def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): """Returns a map of variables to load from a foreign checkpoint. See parent class for details. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. load_all_detection_checkpoint_vars: whether to load all variables (when `fine_tune_checkpoint_type` is `detection`). If False, only variables within the feature extractor scope are included. Default False. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. Raises: ValueError: if fine_tune_checkpoint_type is neither `classification` nor `detection`. """ if fine_tune_checkpoint_type == 'classification': return self._feature_extractor.restore_from_classification_checkpoint_fn( self._extract_features_scope) elif fine_tune_checkpoint_type == 'detection': variables_to_restore = {} for variable in variables_helper.get_global_variables_safely(): var_name = variable.op.name if load_all_detection_checkpoint_vars: variables_to_restore[var_name] = variable else: if var_name.startswith(self._extract_features_scope): variables_to_restore[var_name] = variable return variables_to_restore else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of Trackable objects to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (above) is intended to be used to restore Slim-based models when running Tensorflow 1.x. Args: fine_tune_checkpoint_type: A string inidicating the subset of variables to load. Valid values: `detection`, `classification`, `full`. Default `detection`. An SSD checkpoint has three parts: 1) Classification Network (like ResNet) 2) DeConv layers (for FPN) 3) Box/Class prediction parameters The parameters will be loaded using the following strategy: `classification` - will load #1 `detection` - will load #1, #2 `full` - will load #1, #2, #3 Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ if fine_tune_checkpoint_type == 'classification': return { 'feature_extractor': self._feature_extractor.classification_backbone } elif fine_tune_checkpoint_type == 'detection': fake_model = tf.train.Checkpoint( _feature_extractor=self._feature_extractor) return {'model': fake_model} elif fine_tune_checkpoint_type == 'full': return {'model': self} else: raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( fine_tune_checkpoint_type)) def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ update_ops = [] slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Copy the slim ops to avoid modifying the collection if slim_update_ops: update_ops.extend(slim_update_ops) if self._box_predictor.is_keras_model: update_ops.extend(self._box_predictor.get_updates_for(None)) update_ops.extend(self._box_predictor.get_updates_for( self._box_predictor.inputs)) if self._feature_extractor.is_keras_model: update_ops.extend(self._feature_extractor.get_updates_for(None)) update_ops.extend(self._feature_extractor.get_updates_for( self._feature_extractor.inputs)) return update_ops
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/meta_architectures/ssd_meta_arch.py
ssd_meta_arch.py
"""RFCN Box Predictor.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import box_predictor from object_detection.utils import ops BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class RfcnBoxPredictor(box_predictor.BoxPredictor): """RFCN Box Predictor. Applies a position sensitive ROI pooling on position sensitive feature maps to predict classes and refined locations. See https://arxiv.org/abs/1605.06409 for details. This is used for the second stage of the RFCN meta architecture. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. """ def __init__(self, is_training, num_classes, conv_hyperparams_fn, num_spatial_bins, depth, crop_size, box_code_size): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to construct tf-slim arg_scope with hyperparameters for convolutional layers. num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`. depth: Target depth to reduce the input feature maps to. crop_size: A list of two integers `[crop_height, crop_width]`. box_code_size: Size of encoding for each box. """ super(RfcnBoxPredictor, self).__init__(is_training, num_classes) self._conv_hyperparams_fn = conv_hyperparams_fn self._num_spatial_bins = num_spatial_bins self._depth = depth self._crop_size = crop_size self._box_code_size = box_code_size @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location, proposal_boxes): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. proposal_boxes: A float tensor of shape [batch_size, num_proposals, box_code_size]. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: if num_predictions_per_location is not 1 or if len(image_features) is not 1. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'. format(len(image_features))) image_feature = image_features[0] num_predictions_per_location = num_predictions_per_location[0] batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] net = image_feature with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations') box_encodings = ops.batch_position_sensitive_crop_regions( location_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * total_classes) class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions') class_predictions_with_background = ( ops.batch_position_sensitive_crop_regions( class_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)) class_predictions_with_background = tf.squeeze( class_predictions_with_background, axis=[2, 3]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, total_classes]) return {BOX_ENCODINGS: [box_encodings], CLASS_PREDICTIONS_WITH_BACKGROUND: [class_predictions_with_background]}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_box_predictor.py
rfcn_box_predictor.py
"""Convolutional Box Predictors with and without weight sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import box_predictor from object_detection.utils import shape_utils from object_detection.utils import static_shape BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, num_layers_before_predictor, min_depth, max_depth): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Returns: A dictionary containing: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. (optional) Predictions from other heads. """ predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] # TODO(rathodv): Come up with a better way to generate scope names # in box predictor once we have time to retrain all models in the zoo. # The following lines create scope names to be backwards compatible with the # existing checkpoints. box_predictor_scopes = [_NoopVariableScope()] if len(image_features) > 1: box_predictor_scopes = [ tf.variable_scope('BoxPredictor_{}'.format(i)) for i in range(len(image_features)) ] for (image_feature, num_predictions_per_location, box_predictor_scope) in zip( image_features, num_predictions_per_location_list, box_predictor_scopes): net = image_feature with box_predictor_scope: with slim.arg_scope(self._conv_hyperparams_fn()): with slim.arg_scope([slim.dropout], is_training=self._is_training): # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(image_feature.get_shape()) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info('depth of additional conv before box predictor: {}'. format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net = slim.conv2d( net, depth, [1, 1], reuse=tf.AUTO_REUSE, scope='Conv2d_%d_1x1_%d' % (i, depth)) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(BOX_ENCODINGS) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == BOX_ENCODINGS: head_obj = self._box_prediction_head elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = head_obj.predict( features=net, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions # TODO(rathodv): Replace with slim.arg_scope_func_key once its available # externally. def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op)) # TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above # since they are very similar. class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor with weight sharing. Defines the box predictor as defined in https://arxiv.org/abs/1708.02002. This class differs from ConvolutionalBoxPredictor in that it shares weights and biases while predicting from different feature maps. However, batch_norm parameters are not shared because the statistics of the activations vary among the different feature maps. Also note that separate multi-layer towers are constructed for the box encoding and class predictors respectively. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, depth, num_layers_before_predictor, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower among box prediction head, class prediction head and other heads. use_depthwise: Whether to use depthwise separable conv2d instead of regular conv2d. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower self._use_depthwise = use_depthwise @property def num_classes(self): return self._num_classes def _insert_additional_projection_layer(self, image_feature, inserted_layer_counter, target_channel): if inserted_layer_counter < 0: return image_feature, inserted_layer_counter image_feature = slim.conv2d( image_feature, target_channel, [1, 1], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='ProjectionLayer/conv2d_{}'.format( inserted_layer_counter)) if self._apply_batch_norm: image_feature = slim.batch_norm( image_feature, scope='ProjectionLayer/conv2d_{}/BatchNorm'.format( inserted_layer_counter)) inserted_layer_counter += 1 return image_feature, inserted_layer_counter def _compute_base_tower(self, tower_name_scope, image_feature, feature_index): net = image_feature for i in range(self._num_layers_before_predictor): if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d net = conv_op( net, self._depth, [self._kernel_size, self._kernel_size], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='{}/conv2d_{}'.format(tower_name_scope, i)) if self._apply_batch_norm: net = slim.batch_norm( net, scope='{}/conv2d_{}/BatchNorm/feature_{}'. format(tower_name_scope, i, feature_index)) net = tf.nn.relu6(net) return net def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature, feature_index, num_predictions_per_location): if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: tower_name_scope = 'ClassPredictionTower' else: tower_name_scope = head_name + 'PredictionTower' if self._share_prediction_tower: head_tower_feature = box_tower_feature else: head_tower_feature = self._compute_base_tower( tower_name_scope=tower_name_scope, image_feature=image_feature, feature_index=feature_index) return head_obj.predict( features=head_tower_feature, num_predictions_per_location=num_predictions_per_location) def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels] containing features for a batch of images. Note that when not all tensors in the list have the same number of channels, an additional projection layer will be added on top the tensor to generate feature map with number of channels consitent with the majority. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Note that all values must be the same since the weights are shared. Returns: A dictionary containing: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, code_size] representing the location of the objects. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. (optional) Predictions from other heads. E.g., mask_predictions: A list of float tensors of shape [batch_size, num_anchord_i, num_classes, mask_height, mask_width]. Raises: ValueError: If the num predictions per locations differs between the feature maps. """ if len(set(num_predictions_per_location_list)) > 1: raise ValueError('num predictions per location must be same for all' 'feature maps, found: {}'.format( num_predictions_per_location_list)) feature_channels = [ shape_utils.get_dim_as_int(image_feature.shape[3]) for image_feature in image_features ] has_different_feature_channels = len(set(feature_channels)) > 1 if has_different_feature_channels: inserted_layer_counter = 0 target_channel = max(set(feature_channels), key=feature_channels.count) tf.logging.info('Not all feature maps have the same number of ' 'channels, found: {}, appending additional projection ' 'layers to bring all feature maps to uniformly have {} ' 'channels.'.format(feature_channels, target_channel)) else: # Place holder variables if has_different_feature_channels is False. target_channel = -1 inserted_layer_counter = -1 predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] for feature_index, (image_feature, num_predictions_per_location) in enumerate( zip(image_features, num_predictions_per_location_list)): with tf.variable_scope('WeightSharedConvolutionalBoxPredictor', reuse=tf.AUTO_REUSE): with slim.arg_scope(self._conv_hyperparams_fn()): # TODO(wangjiang) Pass is_training to the head class directly. with slim.arg_scope([slim.dropout], is_training=self._is_training): (image_feature, inserted_layer_counter) = self._insert_additional_projection_layer( image_feature, inserted_layer_counter, target_channel) if self._share_prediction_tower: box_tower_scope = 'PredictionTower' else: box_tower_scope = 'BoxPredictionTower' box_tower_feature = self._compute_base_tower( tower_name_scope=box_tower_scope, image_feature=image_feature, feature_index=feature_index) box_encodings = self._box_prediction_head.predict( features=box_tower_feature, num_predictions_per_location=num_predictions_per_location) predictions[BOX_ENCODINGS].append(box_encodings) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = self._predict_head( head_name=head_name, head_obj=head_obj, image_feature=image_feature, box_tower_feature=box_tower_feature, feature_index=feature_index, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_box_predictor.py
convolutional_box_predictor.py
"""Convolutional Box Predictors with and without weight sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.utils import shape_utils from object_detection.utils import static_shape keras = tf.keras.layers BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Keras Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_heads, class_prediction_heads, other_heads, conv_hyperparams, num_layers_before_predictor, min_depth, max_depth, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_heads: A list of heads that predict the boxes. class_prediction_heads: A list of heads that predict the classes. other_heads: A dictionary mapping head names to lists of convolutional heads. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) if min_depth > max_depth: raise ValueError('min_depth should be less than or equal to max_depth') if len(box_prediction_heads) != len(class_prediction_heads): raise ValueError('All lists of heads must be the same length.') for other_head_list in other_heads.values(): if len(box_prediction_heads) != len(other_head_list): raise ValueError('All lists of heads must be the same length.') self._prediction_heads = { BOX_ENCODINGS: box_prediction_heads, CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # So that all workers build the model in the exact same order self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor self._shared_nets = [] def build(self, input_shapes): """Creates the variables of the layer.""" if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): raise ValueError('This box predictor was constructed with %d heads,' 'but there are %d inputs.' % (len(self._prediction_heads[BOX_ENCODINGS]), len(input_shapes))) for stack_index, input_shape in enumerate(input_shapes): net = [] # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(input_shape) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info( 'depth of additional conv before box predictor: {}'.format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net.append(keras.Conv2D(depth, [1, 1], name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' % (stack_index, i, depth), padding='SAME', **self._conv_hyperparams.params())) net.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' % (stack_index, i, depth))) net.append(self._conv_hyperparams.build_activation_layer( name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' % (stack_index, i, depth), )) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self._shared_nets.append(net) self.built = True def _predict(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) for (index, net) in enumerate(image_features): # Apply shared conv layers before the head predictors. for layer in self._shared_nets[index]: net = layer(net) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name][index] prediction = head_obj(net) predictions[head_name].append(prediction) return predictions class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Box Predictor with weight sharing based on Keras. Defines the box predictor as defined in https://arxiv.org/abs/1708.02002. This class differs from ConvolutionalBoxPredictor in that it shares weights and biases while predicting from different feature maps. However, batch_norm parameters are not shared because the statistics of the activations vary among the different feature maps. Also note that separate multi-layer towers are constructed for the box encoding and class predictors respectively. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams, depth, num_layers_before_predictor, freeze_batchnorm, inplace_batchnorm_update, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False, use_depthwise=False, apply_conv_hyperparams_pointwise=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower among box prediction head, class prediction head and other heads. use_depthwise: Whether to use depthwise separable conv2d instead of regular conv2d. apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to the pointwise_initializer and pointwise_regularizer when using depthwise separable convolutions. By default, conv_hyperparams are only applied to the depthwise initializer and regularizer when use_depthwise is true. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) self._box_prediction_head = box_prediction_head self._prediction_heads = { CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_head, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # so that all workers build the model in the exact same order. self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower self._use_depthwise = use_depthwise self._apply_conv_hyperparams_pointwise = apply_conv_hyperparams_pointwise # Additional projection layers to bring all feature maps to uniform # channels. self._additional_projection_layers = [] # The base tower layers for each head. self._base_tower_layers_for_heads = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in other_heads.keys(): self._base_tower_layers_for_heads[head_name] = [] # A dict maps the tower_name_scope of each head to the shared conv layers in # the base tower for different feature map levels. self._head_scope_conv_layers = {} def _insert_additional_projection_layer( self, inserted_layer_counter, target_channel): projection_layers = [] if inserted_layer_counter >= 0: use_bias = False if (self._apply_batch_norm and not self._conv_hyperparams.force_use_bias()) else True projection_layers.append(keras.Conv2D( target_channel, [1, 1], strides=1, padding='SAME', name='ProjectionLayer/conv2d_{}'.format(inserted_layer_counter), **self._conv_hyperparams.params(use_bias=use_bias))) if self._apply_batch_norm: projection_layers.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='ProjectionLayer/conv2d_{}/BatchNorm'.format( inserted_layer_counter))) inserted_layer_counter += 1 return inserted_layer_counter, projection_layers def _compute_base_tower(self, tower_name_scope, feature_index): conv_layers = [] batch_norm_layers = [] activation_layers = [] use_bias = False if (self._apply_batch_norm and not self._conv_hyperparams.force_use_bias()) else True for additional_conv_layer_idx in range(self._num_layers_before_predictor): layer_name = '{}/conv2d_{}'.format( tower_name_scope, additional_conv_layer_idx) if tower_name_scope not in self._head_scope_conv_layers: if self._use_depthwise: kwargs = self._conv_hyperparams.params(use_bias=use_bias) # Both the regularizer and initializer apply to the depthwise layer, # so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] if self._apply_conv_hyperparams_pointwise: kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] conv_layers.append( tf.keras.layers.SeparableConv2D( self._depth, [self._kernel_size, self._kernel_size], padding='SAME', name=layer_name, **kwargs)) else: conv_layers.append( tf.keras.layers.Conv2D( self._depth, [self._kernel_size, self._kernel_size], padding='SAME', name=layer_name, **self._conv_hyperparams.params(use_bias=use_bias))) # Each feature gets a separate batchnorm parameter even though they share # the same convolution weights. if self._apply_batch_norm: batch_norm_layers.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='{}/conv2d_{}/BatchNorm/feature_{}'.format( tower_name_scope, additional_conv_layer_idx, feature_index))) activation_layers.append(self._conv_hyperparams.build_activation_layer( name='{}/conv2d_{}/activation_{}'.format( tower_name_scope, additional_conv_layer_idx, feature_index))) # Set conv layers as the shared conv layers for different feature maps with # the same tower_name_scope. if tower_name_scope in self._head_scope_conv_layers: conv_layers = self._head_scope_conv_layers[tower_name_scope] # Stack the base_tower_layers in the order of conv_layer, batch_norm_layer # and activation_layer base_tower_layers = [] for i in range(self._num_layers_before_predictor): base_tower_layers.extend([conv_layers[i]]) if self._apply_batch_norm: base_tower_layers.extend([batch_norm_layers[i]]) base_tower_layers.extend([activation_layers[i]]) return conv_layers, base_tower_layers def build(self, input_shapes): """Creates the variables of the layer.""" feature_channels = [ shape_utils.get_dim_as_int(input_shape[3]) for input_shape in input_shapes ] has_different_feature_channels = len(set(feature_channels)) > 1 if has_different_feature_channels: inserted_layer_counter = 0 target_channel = max(set(feature_channels), key=feature_channels.count) tf.logging.info('Not all feature maps have the same number of ' 'channels, found: {}, appending additional projection ' 'layers to bring all feature maps to uniformly have {} ' 'channels.'.format(feature_channels, target_channel)) else: # Place holder variables if has_different_feature_channels is False. target_channel = -1 inserted_layer_counter = -1 def _build_layers(tower_name_scope, feature_index): conv_layers, base_tower_layers = self._compute_base_tower( tower_name_scope=tower_name_scope, feature_index=feature_index) if tower_name_scope not in self._head_scope_conv_layers: self._head_scope_conv_layers[tower_name_scope] = conv_layers return base_tower_layers for feature_index in range(len(input_shapes)): # Additional projection layers should not be shared as input channels # (and thus weight shapes) are different inserted_layer_counter, projection_layers = ( self._insert_additional_projection_layer( inserted_layer_counter, target_channel)) self._additional_projection_layers.append(projection_layers) if self._share_prediction_tower: box_tower_scope = 'PredictionTower' else: box_tower_scope = 'BoxPredictionTower' # For box tower base box_tower_layers = _build_layers(box_tower_scope, feature_index) self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers) for head_name in self._sorted_head_names: if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: tower_name_scope = 'ClassPredictionTower' else: tower_name_scope = '{}PredictionTower'.format(head_name) box_tower_layers = _build_layers(tower_name_scope, feature_index) self._base_tower_layers_for_heads[head_name].append(box_tower_layers) self.built = True def _predict(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) def _apply_layers(base_tower_layers, image_feature): for layer in base_tower_layers: image_feature = layer(image_feature) return image_feature for (index, image_feature) in enumerate(image_features): # Apply additional projection layers to image features for layer in self._additional_projection_layers[index]: image_feature = layer(image_feature) # Apply box tower layers. box_tower_feature = _apply_layers( self._base_tower_layers_for_heads[BOX_ENCODINGS][index], image_feature) box_encodings = self._box_prediction_head(box_tower_feature) predictions[BOX_ENCODINGS].append(box_encodings) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name] if self._share_prediction_tower: head_tower_feature = box_tower_feature else: head_tower_feature = _apply_layers( self._base_tower_layers_for_heads[head_name][index], image_feature) prediction = head_obj(head_tower_feature) predictions[head_name].append(prediction) return predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/convolutional_keras_box_predictor.py
convolutional_keras_box_predictor.py
"""Mask R-CNN Box Predictor.""" from object_detection.core import box_predictor BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, third_stage_heads): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. """ super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, num_predictions_per_location, prediction_stage=2): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently FullyConnectedBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0])) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_box_predictor.py
mask_rcnn_box_predictor.py
"""Mask R-CNN Box Predictor.""" from object_detection.core import box_predictor BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNKerasBoxPredictor(box_predictor.KerasBoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, freeze_batchnorm, box_prediction_head, class_prediction_head, third_stage_heads, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNKerasBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, name=name) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, prediction_stage=2, **kwargs): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. **kwargs: Unused Keyword args Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head(image_feature) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head(image_feature)) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object(image_feature) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/mask_rcnn_keras_box_predictor.py
mask_rcnn_keras_box_predictor.py
"""RFCN Box Predictor.""" import tensorflow.compat.v1 as tf from object_detection.core import box_predictor from object_detection.utils import ops BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class RfcnKerasBoxPredictor(box_predictor.KerasBoxPredictor): """RFCN Box Predictor. Applies a position sensitive ROI pooling on position sensitive feature maps to predict classes and refined locations. See https://arxiv.org/abs/1605.06409 for details. This is used for the second stage of the RFCN meta architecture. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. """ def __init__(self, is_training, num_classes, conv_hyperparams, freeze_batchnorm, num_spatial_bins, depth, crop_size, box_code_size, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`. depth: Target depth to reduce the input feature maps to. crop_size: A list of two integers `[crop_height, crop_width]`. box_code_size: Size of encoding for each box. name: A string name scope to assign to the box predictor. If `None`, Keras will auto-generate one from the class name. """ super(RfcnKerasBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=False, name=name) self._freeze_batchnorm = freeze_batchnorm self._conv_hyperparams = conv_hyperparams self._num_spatial_bins = num_spatial_bins self._depth = depth self._crop_size = crop_size self._box_code_size = box_code_size # Build the shared layers used for both heads self._shared_conv_layers = [] self._shared_conv_layers.append( tf.keras.layers.Conv2D( self._depth, [1, 1], padding='SAME', name='reduce_depth_conv', **self._conv_hyperparams.params())) self._shared_conv_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='reduce_depth_batchnorm')) self._shared_conv_layers.append( self._conv_hyperparams.build_activation_layer( name='reduce_depth_activation')) self._box_encoder_layers = [] location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) self._box_encoder_layers.append( tf.keras.layers.Conv2D( location_feature_map_depth, [1, 1], padding='SAME', name='refined_locations_conv', **self._conv_hyperparams.params())) self._box_encoder_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='refined_locations_batchnorm')) self._class_predictor_layers = [] self._total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self._total_classes) self._class_predictor_layers.append( tf.keras.layers.Conv2D( class_feature_map_depth, [1, 1], padding='SAME', name='class_predictions_conv', **self._conv_hyperparams.params())) self._class_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='class_predictions_batchnorm')) @property def num_classes(self): return self._num_classes def _predict(self, image_features, proposal_boxes, **kwargs): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. proposal_boxes: A float tensor of shape [batch_size, num_proposals, box_code_size]. **kwargs: Unused Keyword args Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: if num_predictions_per_location is not 1 or if len(image_features) is not 1. """ if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'. format(len(image_features))) image_feature = image_features[0] batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] net = image_feature for layer in self._shared_conv_layers: net = layer(net) # Location predictions. box_net = net for layer in self._box_encoder_layers: box_net = layer(box_net) box_encodings = ops.batch_position_sensitive_crop_regions( box_net, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. class_net = net for layer in self._class_predictor_layers: class_net = layer(class_net) class_predictions_with_background = ( ops.batch_position_sensitive_crop_regions( class_net, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)) class_predictions_with_background = tf.squeeze( class_predictions_with_background, axis=[2, 3]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, self._total_classes]) return {BOX_ENCODINGS: [box_encodings], CLASS_PREDICTIONS_WITH_BACKGROUND: [class_predictions_with_background]}
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/rfcn_keras_box_predictor.py
rfcn_keras_box_predictor.py
import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head from object_detection.utils import shape_utils class ConvolutionalClassHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, class_prediction_bias_init=0.0, use_depthwise=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalClassHead, self).__init__(name=name) self._is_training = is_training self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise self._num_class_slots = num_class_slots self._class_predictor_layers = [] if self._use_dropout: self._class_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._class_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='ClassPredictor_depthwise', **conv_hyperparams.params())) self._class_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='ClassPredictor_depthwise_batchnorm')) self._class_predictor_layers.append( conv_hyperparams.build_activation_layer( name='ClassPredictor_depthwise_activation')) self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [1, 1], name='ClassPredictor', **conv_hyperparams.params(use_bias=True))) else: self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_with_background = features for layer in self._class_predictor_layers: class_predictions_with_background = layer( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background class MaskRCNNClassHead(head.KerasHead): """Mask RCNN class prediction head. This is a piece of Mask RCNN which is responsible for predicting just the class scores of boxes. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams, freeze_batchnorm, use_dropout, dropout_keep_prob, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for fully connected dense ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. name: A string name scope to assign to the class head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNClassHead, self).__init__(name=name) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._num_class_slots = num_class_slots self._fc_hyperparams = fc_hyperparams self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._class_predictor_layers = [tf.keras.layers.Flatten()] if self._use_dropout: self._class_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._class_predictor_layers.append( tf.keras.layers.Dense(self._num_class_slots, name='ClassPredictor_dense')) self._class_predictor_layers.append( fc_hyperparams.build_batch_norm(training=(is_training and not freeze_batchnorm), name='ClassPredictor_batchnorm')) def _predict(self, features): """Predicts the class scores for boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. """ spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') net = spatial_averaged_roi_pooled_features for layer in self._class_predictor_layers: net = layer(net) class_predictions_with_background = tf.reshape( net, [-1, 1, self._num_class_slots]) return class_predictions_with_background class WeightSharedConvolutionalClassHead(head.KerasHead): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, num_predictions_per_location, conv_hyperparams, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, apply_conv_hyperparams_to_heads=False, score_converter_fn=tf.identity, return_flat_predictions=True, name=None): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to depthwise seperable convolution layers in the box and class heads. By default, the conv_hyperparams are only applied to layers in the predictor tower when using depthwise separable convolutions. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalClassHead, self).__init__(name=name) self._num_class_slots = num_class_slots self._num_predictions_per_location = num_predictions_per_location self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads self._score_converter_fn = score_converter_fn self._return_flat_predictions = return_flat_predictions self._class_predictor_layers = [] if self._use_dropout: self._class_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: kwargs = conv_hyperparams.params(use_bias=True) if self._apply_conv_hyperparams_to_heads: kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] self._class_predictor_layers.append( tf.keras.layers.SeparableConv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **kwargs)) else: self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_with_background = features for layer in self._class_predictor_layers: class_predictions_with_background = layer( class_predictions_with_background) batch_size, height, width = shape_utils.combined_static_and_dynamic_shape( features)[0:3] class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, self._num_predictions_per_location, self._num_class_slots ]) class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) if self._return_flat_predictions: class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) else: class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, self._num_predictions_per_location * self._num_class_slots ]) return class_predictions_with_background
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_class_head.py
keras_class_head.py
import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head from object_detection.utils import shape_utils class MaskRCNNClassHead(head.Head): """Mask RCNN class prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams_fn, use_dropout, dropout_keep_prob, scope='ClassPredictor'): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. scope: Scope name for the convolution operation. """ super(MaskRCNNClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._scope = scope def predict(self, features, num_predictions_per_location=1): """Predicts boxes and class scores. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) with slim.arg_scope(self._fc_hyperparams_fn()): class_predictions_with_background = slim.fully_connected( flattened_roi_pooled_features, self._num_class_slots, reuse=tf.AUTO_REUSE, activation_fn=None, scope=self._scope) class_predictions_with_background = tf.reshape( class_predictions_with_background, [-1, 1, self._num_class_slots]) return class_predictions_with_background class ConvolutionalClassHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, apply_sigmoid_to_scores=False, class_prediction_bias_init=0.0, use_depthwise=False, scope='ClassPredictor'): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). apply_sigmoid_to_scores: if True, apply the sigmoid on the output class_predictions. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. scope: Scope name for the convolution operation. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._apply_sigmoid_to_scores = apply_sigmoid_to_scores self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise self._scope = scope def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A float tensors of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ net = features if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: depthwise_scope = self._scope + '_depthwise' class_predictions_with_background = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope=depthwise_scope) class_predictions_with_background = slim.conv2d( class_predictions_with_background, num_predictions_per_location * self._num_class_slots, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope=self._scope) else: class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope=self._scope, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional class head. class WeightSharedConvolutionalClassHead(head.Head): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, score_converter_fn=tf.identity, return_flat_predictions=True, scope='ClassPredictor'): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. scope: Scope name for the convolution operation. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalClassHead, self).__init__() self._num_class_slots = num_class_slots self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._score_converter_fn = score_converter_fn self._return_flat_predictions = return_flat_predictions self._scope = scope def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals, or a tensor of shape [batch, height, width, num_predictions_per_location * num_class_slots] representing class predictions before reshaping if self._return_flat_predictions is False. """ class_predictions_net = features if self._use_dropout: class_predictions_net = slim.dropout( class_predictions_net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d class_predictions_with_background = conv_op( class_predictions_net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init), scope=self._scope) batch_size, height, width = shape_utils.combined_static_and_dynamic_shape( features)[0:3] class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, num_predictions_per_location, self._num_class_slots ]) class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) if self._return_flat_predictions: class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) else: class_predictions_with_background = tf.reshape( class_predictions_with_background, [ batch_size, height, width, num_predictions_per_location * self._num_class_slots ]) return class_predictions_with_background
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/class_head.py
class_head.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head from object_detection.utils import ops class MaskRCNNMaskHead(head.Head): """Mask RCNN mask prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_classes, conv_hyperparams_fn=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Raises: ValueError: conv_hyperparams_fn is None. """ super(MaskRCNNMaskHead, self).__init__() self._num_classes = num_classes self._conv_hyperparams_fn = conv_hyperparams_fn self._mask_height = mask_height self._mask_width = mask_width self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers self._mask_prediction_conv_depth = mask_prediction_conv_depth self._masks_are_class_agnostic = masks_are_class_agnostic self._convolve_then_upsample = convolve_then_upsample if conv_hyperparams_fn is None: raise ValueError('conv_hyperparams_fn is None.') def _get_mask_predictor_conv_depth(self, num_feature_channels, num_classes, class_weight=3.0, feature_weight=2.0): """Computes the depth of the mask predictor convolutions. Computes the depth of the mask predictor convolutions given feature channels and number of classes by performing a weighted average of the two in log space to compute the number of convolution channels. The weights that are used for computing the weighted average do not need to sum to 1. Args: num_feature_channels: An integer containing the number of feature channels. num_classes: An integer containing the number of classes. class_weight: Class weight used in computing the weighted average. feature_weight: Feature weight used in computing the weighted average. Returns: An integer containing the number of convolution channels used by mask predictor. """ num_feature_channels_log = math.log(float(num_feature_channels), 2.0) num_classes_log = math.log(float(num_classes), 2.0) weighted_num_feature_channels_log = ( num_feature_channels_log * feature_weight) weighted_num_classes_log = num_classes_log * class_weight total_weight = feature_weight + class_weight num_conv_channels_log = round( (weighted_num_feature_channels_log + weighted_num_classes_log) / total_weight) return int(math.pow(2.0, num_conv_channels_log)) def predict(self, features, num_predictions_per_location=1): """Performs mask prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_classes, mask_height, mask_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') num_conv_channels = self._mask_prediction_conv_depth if num_conv_channels == 0: num_feature_channels = features.get_shape().as_list()[3] num_conv_channels = self._get_mask_predictor_conv_depth( num_feature_channels, self._num_classes) with slim.arg_scope(self._conv_hyperparams_fn()): if not self._convolve_then_upsample: features = tf.image.resize_bilinear( features, [self._mask_height, self._mask_width], align_corners=True) for _ in range(self._mask_prediction_num_conv_layers - 1): features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) if self._convolve_then_upsample: # Replace Transposed Convolution with a Nearest Neighbor upsampling step # followed by 3x3 convolution. height_scale = self._mask_height // features.shape[1].value width_scale = self._mask_width // features.shape[2].value features = ops.nearest_neighbor_upsampling( features, height_scale=height_scale, width_scale=width_scale) features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) num_masks = 1 if self._masks_are_class_agnostic else self._num_classes mask_predictions = slim.conv2d( features, num_outputs=num_masks, activation_fn=None, normalizer_fn=None, kernel_size=[3, 3]) return tf.expand_dims( tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') class ConvolutionalMaskHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ image_feature = features # Add a slot for the background class. if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width net = image_feature if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: mask_predictions = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='MaskPredictor_depthwise') mask_predictions = slim.conv2d( mask_predictions, num_predictions_per_location * num_mask_channels, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') else: mask_predictions = slim.conv2d( net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional mask head. class WeightSharedConvolutionalMaskHead(head.Head): """Weight shared convolutional mask prediction head.""" def __init__(self, num_classes, kernel_size=3, use_dropout=False, dropout_keep_prob=0.8, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). kernel_size: Size of final convolution kernel. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. """ super(WeightSharedConvolutionalMaskHead, self).__init__() self._num_classes = num_classes self._kernel_size = kernel_size self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A tensor of shape [batch_size, num_anchors, num_classes, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions_net = features if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width if self._use_dropout: mask_predictions_net = slim.dropout( mask_predictions_net, keep_prob=self._dropout_keep_prob) mask_predictions = slim.conv2d( mask_predictions_net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/mask_head.py
mask_head.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from six.moves import range import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head from object_detection.utils import ops from object_detection.utils import shape_utils class ConvolutionalMaskHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] # Add a slot for the background class. if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._mask_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='MaskPredictor_depthwise', **conv_hyperparams.params())) self._mask_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='MaskPredictor_depthwise_batchnorm')) self._mask_predictor_layers.append( conv_hyperparams.build_activation_layer( name='MaskPredictor_depthwise_activation')) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [1, 1], name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) else: self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions class MaskRCNNMaskHead(head.KerasHead): """Mask RCNN mask prediction head. This is a piece of Mask RCNN which is responsible for predicting just the pixelwise foreground scores for regions within the boxes. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, freeze_batchnorm, conv_hyperparams, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample=False, name=None): """Constructor. Args: is_training: Indicates whether the Mask head is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. name: A string name scope to assign to the mask head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNMaskHead, self).__init__(name=name) self._is_training = is_training self._freeze_batchnorm = freeze_batchnorm self._num_classes = num_classes self._conv_hyperparams = conv_hyperparams self._mask_height = mask_height self._mask_width = mask_width self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers self._mask_prediction_conv_depth = mask_prediction_conv_depth self._masks_are_class_agnostic = masks_are_class_agnostic self._convolve_then_upsample = convolve_then_upsample self._mask_predictor_layers = [] def build(self, input_shapes): num_conv_channels = self._mask_prediction_conv_depth if num_conv_channels == 0: num_feature_channels = input_shapes.as_list()[3] num_conv_channels = self._get_mask_predictor_conv_depth( num_feature_channels, self._num_classes) for i in range(self._mask_prediction_num_conv_layers - 1): self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_conv_channels, [3, 3], padding='SAME', name='MaskPredictor_conv2d_{}'.format(i), **self._conv_hyperparams.params())) self._mask_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='MaskPredictor_batchnorm_{}'.format(i))) self._mask_predictor_layers.append( self._conv_hyperparams.build_activation_layer( name='MaskPredictor_activation_{}'.format(i))) if self._convolve_then_upsample: # Replace Transposed Convolution with a Nearest Neighbor upsampling step # followed by 3x3 convolution. height_scale = self._mask_height // shape_utils.get_dim_as_int( input_shapes[1]) width_scale = self._mask_width // shape_utils.get_dim_as_int( input_shapes[2]) # pylint: disable=g-long-lambda self._mask_predictor_layers.append(tf.keras.layers.Lambda( lambda features: ops.nearest_neighbor_upsampling( features, height_scale=height_scale, width_scale=width_scale) )) # pylint: enable=g-long-lambda self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_conv_channels, [3, 3], padding='SAME', name='MaskPredictor_upsample_conv2d', **self._conv_hyperparams.params())) self._mask_predictor_layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='MaskPredictor_upsample_batchnorm')) self._mask_predictor_layers.append( self._conv_hyperparams.build_activation_layer( name='MaskPredictor_upsample_activation')) num_masks = 1 if self._masks_are_class_agnostic else self._num_classes self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_masks, [3, 3], padding='SAME', name='MaskPredictor_last_conv2d', **self._conv_hyperparams.params(use_bias=True))) self.built = True def _get_mask_predictor_conv_depth(self, num_feature_channels, num_classes, class_weight=3.0, feature_weight=2.0): """Computes the depth of the mask predictor convolutions. Computes the depth of the mask predictor convolutions given feature channels and number of classes by performing a weighted average of the two in log space to compute the number of convolution channels. The weights that are used for computing the weighted average do not need to sum to 1. Args: num_feature_channels: An integer containing the number of feature channels. num_classes: An integer containing the number of classes. class_weight: Class weight used in computing the weighted average. feature_weight: Feature weight used in computing the weighted average. Returns: An integer containing the number of convolution channels used by mask predictor. """ num_feature_channels_log = math.log(float(num_feature_channels), 2.0) num_classes_log = math.log(float(num_classes), 2.0) weighted_num_feature_channels_log = ( num_feature_channels_log * feature_weight) weighted_num_classes_log = num_classes_log * class_weight total_weight = feature_weight + class_weight num_conv_channels_log = round( (weighted_num_feature_channels_log + weighted_num_classes_log) / total_weight) return int(math.pow(2.0, num_conv_channels_log)) def _predict(self, features): """Predicts pixelwise foreground scores for regions within the boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_classes, mask_height, mask_width]. """ if not self._convolve_then_upsample: features = tf.image.resize_bilinear( features, [self._mask_height, self._mask_width], align_corners=True) mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) return tf.expand_dims( tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') class WeightSharedConvolutionalMaskHead(head.KerasHead): """Weight shared convolutional mask prediction head based on Keras.""" def __init__(self, num_classes, num_predictions_per_location, conv_hyperparams, kernel_size=3, use_dropout=False, dropout_keep_prob=0.8, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(WeightSharedConvolutionalMaskHead, self).__init__(name=name) self._num_classes = num_classes self._num_predictions_per_location = num_predictions_per_location self._kernel_size = kernel_size self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A tensor of shape [batch_size, num_anchors, num_classes, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_mask_head.py
keras_mask_head.py
import functools import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head class MaskRCNNBoxHead(head.Head): """Box prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, fc_hyperparams_fn, use_dropout, dropout_keep_prob, box_code_size, share_box_across_classes=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. """ super(MaskRCNNBoxHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._box_code_size = box_code_size self._share_box_across_classes = share_box_across_classes def predict(self, features, num_predictions_per_location=1): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: box_encodings: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) number_of_boxes = 1 if not self._share_box_across_classes: number_of_boxes = self._num_classes with slim.arg_scope(self._fc_hyperparams_fn()): box_encodings = slim.fully_connected( flattened_roi_pooled_features, number_of_boxes * self._box_code_size, reuse=tf.AUTO_REUSE, activation_fn=None, scope='BoxEncodingPredictor') box_encodings = tf.reshape(box_encodings, [-1, 1, number_of_boxes, self._box_code_size]) return box_encodings class ConvolutionalBoxHead(head.Head): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, use_depthwise=False, box_encodings_clip_range=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalBoxHead, self).__init__() self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. Returns: box_encodings: A float tensors of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ net = features if self._use_depthwise: box_encodings = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='BoxEncodingPredictor_depthwise') box_encodings = slim.conv2d( box_encodings, num_predictions_per_location * self._box_code_size, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') else: box_encodings = slim.conv2d( net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional box head. class WeightSharedConvolutionalBoxHead(head.Head): """Weight shared convolutional box prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, box_code_size, kernel_size=3, use_depthwise=False, box_encodings_clip_range=None, return_flat_predictions=True): """Constructor. Args: box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalBoxHead, self).__init__() self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range self._return_flat_predictions = return_flat_predictions def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, code_size] representing the location of the objects, or a float tensor of shape [batch, height, width, num_predictions_per_location * box_code_size] representing grid box location predictions if self._return_flat_predictions is False. """ box_encodings_net = features if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d box_encodings = conv_op( box_encodings_net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='BoxPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) if self._return_flat_predictions: box_encodings = tf.reshape(box_encodings, [batch_size, -1, self._box_code_size]) return box_encodings
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/box_head.py
box_head.py
import tensorflow.compat.v1 as tf from object_detection.predictors.heads import head class ConvolutionalBoxHead(head.KerasHead): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, box_encodings_clip_range=None, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(ConvolutionalBoxHead, self).__init__(name=name) self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range self._box_encoder_layers = [] if self._use_depthwise: self._box_encoder_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='BoxEncodingPredictor_depthwise', **conv_hyperparams.params())) self._box_encoder_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_depthwise_batchnorm')) self._box_encoder_layers.append( conv_hyperparams.build_activation_layer( name='BoxEncodingPredictor_depthwise_activation')) self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [1, 1], name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings class MaskRCNNBoxHead(head.KerasHead): """Box prediction head. This is a piece of Mask RCNN which is responsible for predicting just the box encodings. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, fc_hyperparams, freeze_batchnorm, use_dropout, dropout_keep_prob, box_code_size, share_box_across_classes=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for fully connected dense ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. name: A string name scope to assign to the box head. If `None`, Keras will auto-generate one from the class name. """ super(MaskRCNNBoxHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._fc_hyperparams = fc_hyperparams self._freeze_batchnorm = freeze_batchnorm self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._box_code_size = box_code_size self._share_box_across_classes = share_box_across_classes self._box_encoder_layers = [tf.keras.layers.Flatten()] if self._use_dropout: self._box_encoder_layers.append( tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) self._number_of_boxes = 1 if not self._share_box_across_classes: self._number_of_boxes = self._num_classes self._box_encoder_layers.append( tf.keras.layers.Dense(self._number_of_boxes * self._box_code_size, name='BoxEncodingPredictor_dense')) self._box_encoder_layers.append( fc_hyperparams.build_batch_norm(training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_batchnorm')) def _predict(self, features): """Predicts box encodings. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. Returns: box_encodings: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. """ spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') net = spatial_averaged_roi_pooled_features for layer in self._box_encoder_layers: net = layer(net) box_encodings = tf.reshape(net, [-1, 1, self._number_of_boxes, self._box_code_size]) return box_encodings # TODO(b/128922690): Unify the implementations of ConvolutionalBoxHead # and WeightSharedConvolutionalBoxHead class WeightSharedConvolutionalBoxHead(head.KerasHead): """Weight shared convolutional box prediction head based on Keras. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, box_code_size, num_predictions_per_location, conv_hyperparams, kernel_size=3, use_depthwise=False, apply_conv_hyperparams_to_heads=False, box_encodings_clip_range=None, return_flat_predictions=True, name=None): """Constructor. Args: box_code_size: Size of encoding for each box. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. kernel_size: Size of final convolution kernel. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to depthwise seperable convolution layers in the box and class heads. By default, the conv_hyperparams are only applied to layers in the predictor tower when using depthwise separable convolutions. box_encodings_clip_range: Min and max values for clipping box_encodings. return_flat_predictions: If true, returns flattened prediction tensor of shape [batch, height * width * num_predictions_per_location, box_coder]. Otherwise returns the prediction tensor before reshaping, whose shape is [batch, height, width, num_predictions_per_location * num_class_slots]. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if use_depthwise is True and kernel_size is 1. """ if use_depthwise and (kernel_size == 1): raise ValueError('Should not use 1x1 kernel when using depthwise conv') super(WeightSharedConvolutionalBoxHead, self).__init__(name=name) self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._apply_conv_hyperparams_to_heads = apply_conv_hyperparams_to_heads self._box_encodings_clip_range = box_encodings_clip_range self._return_flat_predictions = return_flat_predictions self._box_encoder_layers = [] if self._use_depthwise: kwargs = conv_hyperparams.params(use_bias=True) if self._apply_conv_hyperparams_to_heads: kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] kwargs['pointwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['pointwise_initializer'] = kwargs['kernel_initializer'] self._box_encoder_layers.append( tf.keras.layers.SeparableConv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxPredictor', **kwargs)) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) if self._return_flat_predictions: box_encodings = tf.reshape(box_encodings, [batch_size, -1, self._box_code_size]) return box_encodings
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keras_box_head.py
keras_box_head.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.predictors.heads import head class MaskRCNNKeypointHead(head.Head): """Mask RCNN keypoint prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_keypoints=17, conv_hyperparams_fn=None, keypoint_heatmap_height=56, keypoint_heatmap_width=56, keypoint_prediction_num_conv_layers=8, keypoint_prediction_conv_depth=512): """Constructor. Args: num_keypoints: (int scalar) number of keypoints. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. keypoint_heatmap_height: Desired output mask height. The default value is 14. keypoint_heatmap_width: Desired output mask width. The default value is 14. keypoint_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. keypoint_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. """ super(MaskRCNNKeypointHead, self).__init__() self._num_keypoints = num_keypoints self._conv_hyperparams_fn = conv_hyperparams_fn self._keypoint_heatmap_height = keypoint_heatmap_height self._keypoint_heatmap_width = keypoint_heatmap_width self._keypoint_prediction_num_conv_layers = ( keypoint_prediction_num_conv_layers) self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth def predict(self, features, num_predictions_per_location=1): """Performs keypoint prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_keypoints, heatmap_height, heatmap_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d( features, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_1') for i in range(1, self._keypoint_prediction_num_conv_layers): net = slim.conv2d( net, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_%d' % (i + 1)) net = slim.conv2d_transpose( net, self._num_keypoints, [2, 2], scope='deconv1') heatmaps_mask = tf.image.resize_bilinear( net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width], align_corners=True, name='upsample') return tf.expand_dims( tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]), axis=1, name='KeypointPredictor')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/predictors/heads/keypoint_head.py
keypoint_head.py
"""Python library for ssd model, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top # Checking TF version, because this module relies on TPUPartitionedCall # in tensorflow.python.tpu, which is not available until TF r1.14. major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access if int(major) < 1 or (int(major == 1) and int(minor) < 14): raise RuntimeError( 'TensorFlow version >= 1.14 is required. Found ({}).'.format( tf.__version__)) # pylint: disable=protected-access from tensorflow.python.framework import function from tensorflow.python.tpu import functional as tpu_functional from tensorflow.python.tpu import tpu from tensorflow.python.tpu.bfloat16 import bfloat16_scope from tensorflow.python.tpu.ops import tpu_ops from object_detection import exporter from object_detection.builders import model_builder from object_detection.tpu_exporters import utils ANCHORS = 'anchors' BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' def get_prediction_tensor_shapes(pipeline_config): """Gets static shapes of tensors by building the graph on CPU. This function builds the graph on CPU and obtain static shapes of output tensors from TPUPartitionedCall. Shapes information are later used for setting shapes of tensors when TPU graphs are built. This is necessary because tensors coming out of TPUPartitionedCall lose their shape information, which are needed for a lot of CPU operations later. Args: pipeline_config: A TrainEvalPipelineConfig proto. Returns: A python dict of tensors' names and their shapes. """ detection_model = model_builder.build( pipeline_config.model, is_training=False) _, input_tensors = exporter.input_placeholder_fn_map['image_tensor']() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) return { BOX_ENCODINGS: prediction_dict[BOX_ENCODINGS].shape.as_list(), CLASS_PREDICTIONS_WITH_BACKGROUND: prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND].shape.as_list(), ANCHORS: prediction_dict[ANCHORS].shape.as_list(), } def recover_shape(preprocessed_inputs, prediction_outputs, shapes_info): """Recovers shape from TPUPartitionedCall. Args: preprocessed_inputs: 4D tensor, shaped (batch, channels, height, width) prediction_outputs: Python list of tensors, in the following order - box_encodings - 3D tensor, shaped (code_size, batch, num_anchors); class_predictions_with_background - 3D tensor, shaped (num_classes + 1, batch, num_anchors); anchors - 2D tensor, shaped (4, num_anchors) shapes_info: Python dict of tensor shapes as lists. Returns: preprocessed_inputs: 4D tensor, shaped (batch, height, width, channels) box_encodings: 3D tensor, shaped (batch, num_anchors, code_size) class_predictions_with_background: 3D tensor, shaped (batch, num_anchors, num_classes + 1) anchors: 2D tensor, shaped (num_anchors, 4) """ # Dimshuffle: (b, c, h, w) -> (b, h, w, c) preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) box_encodings = tf.transpose(prediction_outputs[0], perm=[1, 2, 0]) # [None, None, detection_model._box_coder.code_size] box_encodings.set_shape(shapes_info[BOX_ENCODINGS]) class_predictions_with_background = tf.transpose( prediction_outputs[1], perm=[1, 2, 0]) # [None, None, num_classes + 1] class_predictions_with_background.set_shape( shapes_info[CLASS_PREDICTIONS_WITH_BACKGROUND]) anchors = tf.transpose(prediction_outputs[2], perm=[1, 0]) # [None, 4] anchors.set_shape(shapes_info[ANCHORS]) return (preprocessed_inputs, box_encodings, class_predictions_with_background, anchors) def build_graph(pipeline_config, shapes_info, input_type='encoded_image_string_tensor', use_bfloat16=False): """Builds TPU serving graph of ssd to be exported. Args: pipeline_config: A TrainEvalPipelineConfig proto. shapes_info: A python dict of tensors' names and their shapes, returned by `get_prediction_tensor_shapes()`. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. Returns: placeholder_tensor: A placeholder tensor, type determined by `input_type`. result_tensor_dict: A python dict of tensors' names and tensors. """ detection_model = model_builder.build( pipeline_config.model, is_training=False) placeholder_tensor, input_tensors = \ exporter.input_placeholder_fn_map[input_type]() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) # Dimshuffle: (b, h, w, c) -> (b, c, h, w) # This is to avoid extra padding due to TPU memory layout: # We swap larger dimensions in and smaller dimensions out, so that small # dimensions don't get padded tens / hundreds times of its own size. # This trick is applied to other similar tensors below. preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2]) if use_bfloat16: preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16) def predict_tpu_subgraph(preprocessed_inputs, true_image_shapes): """Wraps over the CPU version of `predict()`. This builds a same graph as the original `predict()`, manipulates result tensors' dimensions to be memory efficient on TPU, and returns them as list of tensors. Args: preprocessed_inputs: A 4D tensor of shape (batch, channels, height, width) true_image_shapes: True image shapes tensor. Returns: A Python list of tensors: box_encodings: 3D tensor of shape (code_size, batch_size, num_anchors) class_predictions_with_background: 3D tensor, shape (num_classes + 1, batch_size, num_anchors) anchors: 2D tensor of shape (4, num_anchors) """ # Dimshuffle: (b, c, h, w) -> (b, h, w, c) preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) if use_bfloat16: with bfloat16_scope(): prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) else: prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) # Dimshuffle: (batch, anchors, depth) -> (depth, batch, anchors) return [ tf.transpose(prediction_dict[BOX_ENCODINGS], perm=[2, 0, 1]), tf.transpose( prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]), tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]), ] @function.Defun(capture_resource_var_by_value=False) def predict_tpu(): return tpu.rewrite(predict_tpu_subgraph, [preprocessed_inputs, true_image_shapes]) prediction_outputs = tpu_functional.TPUPartitionedCall( args=predict_tpu.captured_inputs, device_ordinal=tpu_ops.tpu_ordinal_selector(), Tout=[o.type for o in predict_tpu.definition.signature.output_arg], f=predict_tpu) (preprocessed_inputs, box_encodings, class_predictions_with_background, anchors) = recover_shape(preprocessed_inputs, prediction_outputs, shapes_info) output_tensors = { 'preprocessed_inputs': preprocessed_inputs, BOX_ENCODINGS: box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background, ANCHORS: anchors, } if use_bfloat16: output_tensors = utils.bfloat16_to_float32_nested(output_tensors) postprocessed_tensors = detection_model.postprocess(output_tensors, true_image_shapes) result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors, 'inference_op') return placeholder_tensor, result_tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/ssd.py
ssd.py
"""Python library for faster_rcnn model, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=protected-access import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top # Checking TF version, because this module relies on TPUPartitionedCall # in tensorflow.python.tpu, which is not available until TF r1.14. major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access if int(major) < 1 or (int(major == 1) and int(minor) < 14): raise RuntimeError( 'TensorFlow version >= 1.14 is required. Found ({}).'.format( tf.__version__)) from tensorflow.python.framework import function from tensorflow.python.tpu import functional as tpu_functional from tensorflow.python.tpu import tpu from tensorflow.python.tpu.bfloat16 import bfloat16_scope from tensorflow.python.tpu.ops import tpu_ops from object_detection import exporter from object_detection.builders import model_builder from object_detection.tpu_exporters import utils ANCHORS = 'anchors' BOX_CLASSIFIER_FEATURES = 'box_classifier_features' BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' IMAGE_SHAPE = 'image_shape' NUM_PROPOSALS = 'num_proposals' PROPOSAL_BOXES = 'proposal_boxes' PROPOSAL_BOXES_NORMALIZED = 'proposal_boxes_normalized' REFINED_BOX_ENCODINGS = 'refined_box_encodings' RPN_BOX_ENCODINGS = 'rpn_box_encodings' RPN_BOX_PREDICTOR_FEATURES = 'rpn_box_predictor_features' RPN_FEATURES_TO_CROP = 'rpn_features_to_crop' RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND = \ 'rpn_objectness_predictions_with_background' INPUT_BUILDER_UTIL_MAP = { 'model_build': model_builder.build, } def modify_config(pipeline_config): """Modifies pipeline config to build the correct graph for TPU.""" # faster_rcnn.use_static_shapes and faster_rcnn.use_static_shapes_for_eval # are set to True in order for detection_model.use_static_shapes to be True. # We need to set this so that clip_to_window in _predict_first_stage # can work on TPU. However as a side-effect, the flag forces the use of # padded version of NMS. pipeline_config.model.faster_rcnn.use_static_shapes = True pipeline_config.model.faster_rcnn.use_static_shapes_for_eval = True pipeline_config.model.faster_rcnn.use_matmul_crop_and_resize = True pipeline_config.model.faster_rcnn.clip_anchors_to_image = True return pipeline_config def get_prediction_tensor_shapes(pipeline_config): """Gets static shapes of tensors by building the graph on CPU. This function builds the graph on CPU and obtain static shapes of output tensors from TPUPartitionedCall. Shapes information are later used for setting shapes of tensors when TPU graphs are built. This is necessary because tensors coming out of TPUPartitionedCall lose their shape information, which are needed for a lot of CPU operations later. Args: pipeline_config: A TrainEvalPipelineConfig proto. Returns: A python dict of tensors' names and their shapes. """ pipeline_config = modify_config(pipeline_config) detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( pipeline_config.model, is_training=False) _, input_tensors = exporter.input_placeholder_fn_map['image_tensor']() inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) shapes_info = {} for k, v in prediction_dict.items(): if isinstance(v, list): shapes_info[k] = [item.shape.as_list() for item in v] else: shapes_info[k] = v.shape.as_list() return shapes_info def build_graph(pipeline_config, shapes_info, input_type='encoded_image_string_tensor', use_bfloat16=True): """Builds serving graph of faster_rcnn to be exported. Args: pipeline_config: A TrainEvalPipelineConfig proto. shapes_info: A python dict of tensors' names and their shapes, returned by `get_prediction_tensor_shapes()`. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. Returns: placeholder_tensor: A placeholder tensor, type determined by `input_type`. result_tensor_dict: A python dict of tensors' names and tensors. """ pipeline_config = modify_config(pipeline_config) detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( pipeline_config.model, is_training=False) placeholder_tensor, input_tensors = \ exporter.input_placeholder_fn_map[input_type]() # CPU pre-processing inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) # Dimshuffle: [b, h, w, c] -> [b, c, h, w] preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2]) if use_bfloat16: preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16) # TPU feature extraction def tpu_subgraph_predict_fn(preprocessed_inputs, true_image_shapes): """Defines the first part of graph on TPU.""" # [b, c, h, w] -> [b, h, w, c] preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1]) prediction_dict = detection_model.predict(preprocessed_inputs, true_image_shapes) return ( # [batch, anchor, depth] -> [depth, batch, anchor] tf.transpose(prediction_dict[RPN_BOX_ENCODINGS], perm=[2, 0, 1]), # [batch, anchor, depth] -> [depth, batch, anchor] tf.transpose( prediction_dict[RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]), # [anchors, depth] tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]), # [num_proposals, num_classes, code_size] prediction_dict[REFINED_BOX_ENCODINGS], prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], prediction_dict[NUM_PROPOSALS], prediction_dict[PROPOSAL_BOXES]) @function.Defun(capture_resource_var_by_value=False) def tpu_subgraph_predict(): if use_bfloat16: with bfloat16_scope(): return tpu.rewrite(tpu_subgraph_predict_fn, [preprocessed_inputs, true_image_shapes]) else: return tpu.rewrite(tpu_subgraph_predict_fn, [preprocessed_inputs, true_image_shapes]) (rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, refined_box_encodings, class_predictions_with_background, num_proposals, proposal_boxes) = tpu_functional.TPUPartitionedCall( args=tpu_subgraph_predict.captured_inputs, device_ordinal=tpu_ops.tpu_ordinal_selector(), Tout=[ o.type for o in tpu_subgraph_predict.definition.signature.output_arg ], f=tpu_subgraph_predict) prediction_dict = { RPN_BOX_ENCODINGS: tf.transpose(rpn_box_encodings, perm=[1, 2, 0]), RPN_OBJECTNESS_PREDICTIONS_WITH_BACKGROUND: tf.transpose( rpn_objectness_predictions_with_background, perm=[1, 2, 0]), ANCHORS: tf.transpose(anchors, perm=[1, 0]), REFINED_BOX_ENCODINGS: refined_box_encodings, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background, NUM_PROPOSALS: num_proposals, PROPOSAL_BOXES: proposal_boxes } for k in prediction_dict: if isinstance(prediction_dict[k], list): prediction_dict[k] = [ prediction_dict[k][idx].set_shape(shapes_info[k][idx]) for idx in len(prediction_dict[k])] else: prediction_dict[k].set_shape(shapes_info[k]) if use_bfloat16: prediction_dict = utils.bfloat16_to_float32_nested(prediction_dict) # CPU post-processing (NMS) postprocessed_tensors = detection_model.postprocess(prediction_dict, true_image_shapes) result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors, 'inference_op') return placeholder_tensor, result_tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/faster_rcnn.py
faster_rcnn.py
"""Python library for exporting SavedModel, tailored for TPU inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from google.protobuf import text_format # pylint: disable=g-direct-tensorflow-import from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants from tensorflow.python.tpu import tpu # pylint: enable=g-direct-tensorflow-import from object_detection.protos import pipeline_pb2 from object_detection.tpu_exporters import faster_rcnn from object_detection.tpu_exporters import ssd model_map = { 'faster_rcnn': faster_rcnn, 'ssd': ssd, } def parse_pipeline_config(pipeline_config_file): """Returns pipeline config and meta architecture name.""" with tf.gfile.GFile(pipeline_config_file, 'r') as config_file: config_str = config_file.read() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Merge(config_str, pipeline_config) meta_arch = pipeline_config.model.WhichOneof('model') return pipeline_config, meta_arch def export(pipeline_config_file, ckpt_path, export_dir, input_placeholder_name='placeholder_tensor', input_type='encoded_image_string_tensor', use_bfloat16=False): """Exports as SavedModel. Args: pipeline_config_file: Pipeline config file name. ckpt_path: Training checkpoint path. export_dir: Directory to export SavedModel. input_placeholder_name: input placeholder's name in SavedModel signature. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. """ pipeline_config, meta_arch = parse_pipeline_config(pipeline_config_file) shapes_info = model_map[meta_arch].get_prediction_tensor_shapes( pipeline_config) with tf.Graph().as_default(), tf.Session() as sess: placeholder_tensor, result_tensor_dict = model_map[meta_arch].build_graph( pipeline_config, shapes_info, input_type, use_bfloat16) saver = tf.train.Saver() init_op = tf.global_variables_initializer() sess.run(init_op) if ckpt_path is not None: saver.restore(sess, ckpt_path) # export saved model builder = tf.saved_model.builder.SavedModelBuilder(export_dir) tensor_info_inputs = { input_placeholder_name: tf.saved_model.utils.build_tensor_info(placeholder_tensor) } tensor_info_outputs = { k: tf.saved_model.utils.build_tensor_info(v) for k, v in result_tensor_dict.items() } detection_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) tf.logging.info('Inputs:\n{}\nOutputs:{}\nPredict method name:{}'.format( tensor_info_inputs, tensor_info_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) # Graph for TPU. builder.add_meta_graph_and_variables( sess, [ tf.saved_model.tag_constants.SERVING, tf.saved_model.tag_constants.TPU ], signature_def_map={ tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature, }, strip_default_attrs=True) # Graph for CPU, this is for passing infra validation. builder.add_meta_graph( [tf.saved_model.tag_constants.SERVING], signature_def_map={ tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature, }, strip_default_attrs=True) builder.save(as_text=False) tf.logging.info('Model saved to {}'.format(export_dir)) def run_inference(inputs, pipeline_config_file, ckpt_path, input_type='encoded_image_string_tensor', use_bfloat16=False, repeat=1): """Runs inference on TPU. Args: inputs: Input image with the same type as `input_type` pipeline_config_file: Pipeline config file name. ckpt_path: Training checkpoint path. input_type: One of 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string 'image_tensor': a 4d tensor with dtype=tf.uint8 'tf_example': a 1d tensor with dtype=tf.string use_bfloat16: If true, use tf.bfloat16 on TPU. repeat: Number of times to repeat running the provided input for profiling. Returns: A dict of resulting tensors. """ pipeline_config, meta_arch = parse_pipeline_config(pipeline_config_file) shapes_info = model_map[meta_arch].get_prediction_tensor_shapes( pipeline_config) with tf.Graph().as_default(), tf.Session() as sess: placeholder_tensor, result_tensor_dict = model_map[meta_arch].build_graph( pipeline_config, shapes_info, input_type, use_bfloat16) saver = tf.train.Saver() init_op = tf.global_variables_initializer() sess.run(tpu.initialize_system()) sess.run(init_op) if ckpt_path is not None: saver.restore(sess, ckpt_path) for _ in range(repeat): tensor_dict_out = sess.run( result_tensor_dict, feed_dict={placeholder_tensor: [inputs]}) sess.run(tpu.shutdown_system()) return tensor_dict_out def run_inference_from_saved_model(inputs, saved_model_dir, input_placeholder_name='placeholder_tensor', repeat=1): """Loads saved model and run inference on TPU. Args: inputs: Input image with the same type as `input_type` saved_model_dir: The directory SavedModel being exported to. input_placeholder_name: input placeholder's name in SavedModel signature. repeat: Number of times to repeat running the provided input for profiling. Returns: A dict of resulting tensors. """ with tf.Graph().as_default(), tf.Session() as sess: meta_graph = loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], saved_model_dir) sess.run(tpu.initialize_system()) key_prediction = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tensor_name_input = ( meta_graph.signature_def[key_prediction].inputs[input_placeholder_name] .name) tensor_name_output = { k: v.name for k, v in (meta_graph.signature_def[key_prediction].outputs.items()) } for _ in range(repeat): tensor_dict_out = sess.run( tensor_name_output, feed_dict={tensor_name_input: [inputs]}) sess.run(tpu.shutdown_system()) return tensor_dict_out
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/tpu_exporters/export_saved_model_tpu_lib.py
export_saved_model_tpu_lib.py
import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.protos import preprocessor_pb2 def _get_step_config_from_proto(preprocessor_step_config, step_name): """Returns the value of a field named step_name from proto. Args: preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. step_name: Name of the field to get value from. Returns: result_dict: a sub proto message from preprocessor_step_config which will be later converted to a dictionary. Raises: ValueError: If field does not exist in proto. """ for field, value in preprocessor_step_config.ListFields(): if field.name == step_name: return value raise ValueError('Could not get field %s from proto!' % step_name) def _get_dict_from_proto(config): """Helper function to put all proto fields into a dictionary. For many preprocessing steps, there's an trivial 1-1 mapping from proto fields to function arguments. This function automatically populates a dictionary with the arguments from the proto. Protos that CANNOT be trivially populated include: * nested messages. * steps that check if an optional field is set (ie. where None != 0). * protos that don't map 1-1 to arguments (ie. list should be reshaped). * fields requiring additional validation (ie. repeated field has n elements). Args: config: A protobuf object that does not violate the conditions above. Returns: result_dict: |config| converted into a python dictionary. """ result_dict = {} for field, value in config.ListFields(): result_dict[field.name] = value return result_dict # A map from a PreprocessingStep proto config field name to the preprocessing # function that should be used. The PreprocessingStep proto should be parsable # with _get_dict_from_proto. PREPROCESSING_FUNCTION_MAP = { 'normalize_image': preprocessor.normalize_image, 'random_pixel_value_scale': preprocessor.random_pixel_value_scale, 'random_image_scale': preprocessor.random_image_scale, 'random_rgb_to_gray': preprocessor.random_rgb_to_gray, 'random_adjust_brightness': preprocessor.random_adjust_brightness, 'random_adjust_contrast': preprocessor.random_adjust_contrast, 'random_adjust_hue': preprocessor.random_adjust_hue, 'random_adjust_saturation': preprocessor.random_adjust_saturation, 'random_distort_color': preprocessor.random_distort_color, 'random_crop_to_aspect_ratio': preprocessor.random_crop_to_aspect_ratio, 'random_black_patches': preprocessor.random_black_patches, 'random_jpeg_quality': preprocessor.random_jpeg_quality, 'random_downscale_to_target_pixels': preprocessor.random_downscale_to_target_pixels, 'random_patch_gaussian': preprocessor.random_patch_gaussian, 'rgb_to_gray': preprocessor.rgb_to_gray, 'scale_boxes_to_pixel_coordinates': (preprocessor.scale_boxes_to_pixel_coordinates), 'subtract_channel_mean': preprocessor.subtract_channel_mean, 'convert_class_logits_to_softmax': preprocessor.convert_class_logits_to_softmax, 'adjust_gamma': preprocessor.adjust_gamma, } # A map to convert from preprocessor_pb2.ResizeImage.Method enum to # tf.image.ResizeMethod. RESIZE_METHOD_MAP = { preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( tf.image.ResizeMethod.NEAREST_NEIGHBOR), } def get_random_jitter_kwargs(proto): return { 'ratio': proto.ratio, 'jitter_mode': preprocessor_pb2.RandomJitterBoxes.JitterMode.Name(proto.jitter_mode ).lower() } def build(preprocessor_step_config): """Builds preprocessing step based on the configuration. Args: preprocessor_step_config: PreprocessingStep configuration proto. Returns: function, argmap: A callable function and an argument map to call function with. Raises: ValueError: On invalid configuration. """ step_type = preprocessor_step_config.WhichOneof('preprocessing_step') if step_type in PREPROCESSING_FUNCTION_MAP: preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] step_config = _get_step_config_from_proto(preprocessor_step_config, step_type) function_args = _get_dict_from_proto(step_config) return (preprocessing_function, function_args) if step_type == 'random_horizontal_flip': config = preprocessor_step_config.random_horizontal_flip return (preprocessor.random_horizontal_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_vertical_flip': config = preprocessor_step_config.random_vertical_flip return (preprocessor.random_vertical_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_rotation90': config = preprocessor_step_config.random_rotation90 return (preprocessor.random_rotation90, { 'keypoint_rot_permutation': tuple( config.keypoint_rot_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_crop_image': config = preprocessor_step_config.random_crop_image return (preprocessor.random_crop_image, { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, }) if step_type == 'random_pad_image': config = preprocessor_step_config.random_pad_image min_image_size = None if (config.HasField('min_image_height') != config.HasField('min_image_width')): raise ValueError('min_image_height and min_image_width should be either ' 'both set or both unset.') if config.HasField('min_image_height'): min_image_size = (config.min_image_height, config.min_image_width) max_image_size = None if (config.HasField('max_image_height') != config.HasField('max_image_width')): raise ValueError('max_image_height and max_image_width should be either ' 'both set or both unset.') if config.HasField('max_image_height'): max_image_size = (config.max_image_height, config.max_image_width) pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_pad_image, { 'min_image_size': min_image_size, 'max_image_size': max_image_size, 'pad_color': pad_color, }) if step_type == 'random_absolute_pad_image': config = preprocessor_step_config.random_absolute_pad_image max_height_padding = config.max_height_padding or 1 max_width_padding = config.max_width_padding or 1 pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_absolute_pad_image, { 'max_height_padding': max_height_padding, 'max_width_padding': max_width_padding, 'pad_color': pad_color, }) if step_type == 'random_crop_pad_image': config = preprocessor_step_config.random_crop_pad_image min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio and len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio and len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) kwargs = { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, 'pad_color': pad_color, } if min_padded_size_ratio: kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) if max_padded_size_ratio: kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) return (preprocessor.random_crop_pad_image, kwargs) if step_type == 'random_resize_method': config = preprocessor_step_config.random_resize_method return (preprocessor.random_resize_method, { 'target_size': [config.target_height, config.target_width], }) if step_type == 'resize_image': config = preprocessor_step_config.resize_image method = RESIZE_METHOD_MAP[config.method] return (preprocessor.resize_image, { 'new_height': config.new_height, 'new_width': config.new_width, 'method': method }) if step_type == 'random_self_concat_image': config = preprocessor_step_config.random_self_concat_image return (preprocessor.random_self_concat_image, { 'concat_vertical_probability': config.concat_vertical_probability, 'concat_horizontal_probability': config.concat_horizontal_probability }) if step_type == 'ssd_random_crop': config = preprocessor_step_config.ssd_random_crop if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop, {}) if step_type == 'autoaugment_image': config = preprocessor_step_config.autoaugment_image return (preprocessor.autoaugment_image, { 'policy_name': config.policy_name, }) if step_type == 'drop_label_probabilistically': config = preprocessor_step_config.drop_label_probabilistically return (preprocessor.drop_label_probabilistically, { 'dropped_label': config.label, 'drop_probability': config.drop_probability, }) if step_type == 'remap_labels': config = preprocessor_step_config.remap_labels return (preprocessor.remap_labels, { 'original_labels': config.original_labels, 'new_label': config.new_label }) if step_type == 'ssd_random_crop_pad': config = preprocessor_step_config.ssd_random_crop_pad if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] min_padded_size_ratio = [tuple(op.min_padded_size_ratio) for op in config.operations] max_padded_size_ratio = [tuple(op.max_padded_size_ratio) for op in config.operations] pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) for op in config.operations] return (preprocessor.ssd_random_crop_pad, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, 'min_padded_size_ratio': min_padded_size_ratio, 'max_padded_size_ratio': max_padded_size_ratio, 'pad_color': pad_color, }) return (preprocessor.ssd_random_crop_pad, {}) if step_type == 'ssd_random_crop_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_fixed_aspect_ratio, { 'min_object_covered': min_object_covered, 'aspect_ratio': config.aspect_ratio, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio kwargs = {} aspect_ratio = config.aspect_ratio if aspect_ratio: kwargs['aspect_ratio'] = aspect_ratio min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio: if len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio: if len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) if config.operations: kwargs['min_object_covered'] = [op.min_object_covered for op in config.operations] kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] kwargs['area_range'] = [(op.min_area, op.max_area) for op in config.operations] kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] kwargs['random_coef'] = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) if step_type == 'random_square_crop_by_scale': config = preprocessor_step_config.random_square_crop_by_scale return preprocessor.random_square_crop_by_scale, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'max_border': config.max_border, 'num_scales': config.num_scales } if step_type == 'random_scale_crop_and_pad_to_square': config = preprocessor_step_config.random_scale_crop_and_pad_to_square return preprocessor.random_scale_crop_and_pad_to_square, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'output_size': config.output_size, } if step_type == 'random_jitter_boxes': config = preprocessor_step_config.random_jitter_boxes kwargs = get_random_jitter_kwargs(config) return preprocessor.random_jitter_boxes, kwargs raise ValueError('Unknown preprocessing step.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/preprocessor_builder.py
preprocessor_builder.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import zip from object_detection.anchor_generators import flexible_grid_anchor_generator from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import multiple_grid_anchor_generator from object_detection.anchor_generators import multiscale_grid_anchor_generator from object_detection.protos import anchor_generator_pb2 def build(anchor_generator_config): """Builds an anchor generator based on the config. Args: anchor_generator_config: An anchor_generator.proto object containing the config for the desired anchor generator. Returns: Anchor generator based on the config. Raises: ValueError: On empty anchor generator proto. """ if not isinstance(anchor_generator_config, anchor_generator_pb2.AnchorGenerator): raise ValueError('anchor_generator_config not of type ' 'anchor_generator_pb2.AnchorGenerator') if anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'grid_anchor_generator': grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator return grid_anchor_generator.GridAnchorGenerator( scales=[float(scale) for scale in grid_anchor_generator_config.scales], aspect_ratios=[float(aspect_ratio) for aspect_ratio in grid_anchor_generator_config.aspect_ratios], base_anchor_size=[grid_anchor_generator_config.height, grid_anchor_generator_config.width], anchor_stride=[grid_anchor_generator_config.height_stride, grid_anchor_generator_config.width_stride], anchor_offset=[grid_anchor_generator_config.height_offset, grid_anchor_generator_config.width_offset]) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'ssd_anchor_generator': ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator anchor_strides = None if ssd_anchor_generator_config.height_stride: anchor_strides = list( zip(ssd_anchor_generator_config.height_stride, ssd_anchor_generator_config.width_stride)) anchor_offsets = None if ssd_anchor_generator_config.height_offset: anchor_offsets = list( zip(ssd_anchor_generator_config.height_offset, ssd_anchor_generator_config.width_offset)) return multiple_grid_anchor_generator.create_ssd_anchors( num_layers=ssd_anchor_generator_config.num_layers, min_scale=ssd_anchor_generator_config.min_scale, max_scale=ssd_anchor_generator_config.max_scale, scales=[float(scale) for scale in ssd_anchor_generator_config.scales], aspect_ratios=ssd_anchor_generator_config.aspect_ratios, interpolated_scale_aspect_ratio=( ssd_anchor_generator_config.interpolated_scale_aspect_ratio), base_anchor_size=[ ssd_anchor_generator_config.base_anchor_height, ssd_anchor_generator_config.base_anchor_width ], anchor_strides=anchor_strides, anchor_offsets=anchor_offsets, reduce_boxes_in_lowest_layer=( ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'multiscale_anchor_generator': cfg = anchor_generator_config.multiscale_anchor_generator return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( cfg.min_level, cfg.max_level, cfg.anchor_scale, [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios], cfg.scales_per_octave, cfg.normalize_coordinates ) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'flexible_grid_anchor_generator': cfg = anchor_generator_config.flexible_grid_anchor_generator base_sizes = [] aspect_ratios = [] strides = [] offsets = [] for anchor_grid in cfg.anchor_grid: base_sizes.append(tuple(anchor_grid.base_sizes)) aspect_ratios.append(tuple(anchor_grid.aspect_ratios)) strides.append((anchor_grid.height_stride, anchor_grid.width_stride)) offsets.append((anchor_grid.height_offset, anchor_grid.width_offset)) return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator( base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates) else: raise ValueError('Empty anchor generator.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/anchor_generator_builder.py
anchor_generator_builder.py
import functools import sys from absl import logging from object_detection.builders import anchor_generator_builder from object_detection.builders import box_coder_builder from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import image_resizer_builder from object_detection.builders import losses_builder from object_detection.builders import matcher_builder from object_detection.builders import post_processing_builder from object_detection.builders import region_similarity_calculator_builder as sim_calc from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import post_processing from object_detection.core import target_assigner from object_detection.meta_architectures import center_net_meta_arch from object_detection.meta_architectures import context_rcnn_meta_arch from object_detection.meta_architectures import deepmac_meta_arch from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.predictors.heads import mask_head from object_detection.protos import losses_pb2 from object_detection.protos import model_pb2 from object_detection.utils import label_map_util from object_detection.utils import ops from object_detection.utils import spatial_transform_ops as spatial_ops from object_detection.utils import tf_version ## Feature Extractors for TF ## This section conditionally imports different feature extractors based on the ## Tensorflow version. ## # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from object_detection.models import center_net_hourglass_feature_extractor from object_detection.models import center_net_mobilenet_v2_feature_extractor from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor from object_detection.models import center_net_resnet_feature_extractor from object_detection.models import center_net_resnet_v1_fpn_feature_extractor from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor from object_detection.predictors import rfcn_keras_box_predictor if sys.version_info[0] >= 3: from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn if tf_version.is_tf1(): from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor from object_detection.predictors import rfcn_box_predictor # pylint: enable=g-import-not-at-top if tf_version.is_tf2(): SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor, 'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor, 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor, 'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor, 'ssd_resnet50_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor, 'ssd_resnet101_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor, 'ssd_resnet152_v1_fpn_keras': ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor, 'ssd_efficientnet-b0_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b1_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b2_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b3_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b4_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b5_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b6_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor, 'ssd_efficientnet-b7_bifpn_keras': ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor, } FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_resnet50_keras': frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor, 'faster_rcnn_resnet101_keras': frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor, 'faster_rcnn_resnet152_keras': frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, 'faster_rcnn_inception_resnet_v2_keras': frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, 'faster_rcnn_resnet50_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor, 'faster_rcnn_resnet101_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor, 'faster_rcnn_resnet152_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = { 'resnet_v2_50': center_net_resnet_feature_extractor.resnet_v2_50, 'resnet_v2_101': center_net_resnet_feature_extractor.resnet_v2_101, 'resnet_v1_18_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_18_fpn, 'resnet_v1_34_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_34_fpn, 'resnet_v1_50_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn, 'resnet_v1_101_fpn': center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn, 'hourglass_10': center_net_hourglass_feature_extractor.hourglass_10, 'hourglass_20': center_net_hourglass_feature_extractor.hourglass_20, 'hourglass_32': center_net_hourglass_feature_extractor.hourglass_32, 'hourglass_52': center_net_hourglass_feature_extractor.hourglass_52, 'hourglass_104': center_net_hourglass_feature_extractor.hourglass_104, 'mobilenet_v2': center_net_mobilenet_v2_feature_extractor.mobilenet_v2, 'mobilenet_v2_fpn': center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn, 'mobilenet_v2_fpn_sep_conv': center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn, } FEATURE_EXTRACTOR_MAPS = [ CENTER_NET_EXTRACTOR_FUNCTION_MAP, FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP, SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP ] if tf_version.is_tf1(): SSD_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_inception_v2': SSDInceptionV2FeatureExtractor, 'ssd_inception_v3': SSDInceptionV3FeatureExtractor, 'ssd_mobilenet_v1': SSDMobileNetV1FeatureExtractor, 'ssd_mobilenet_v1_fpn': SSDMobileNetV1FpnFeatureExtractor, 'ssd_mobilenet_v1_ppn': SSDMobileNetV1PpnFeatureExtractor, 'ssd_mobilenet_v2': SSDMobileNetV2FeatureExtractor, 'ssd_mobilenet_v2_fpn': SSDMobileNetV2FpnFeatureExtractor, 'ssd_mobilenet_v2_mnasfpn': SSDMobileNetV2MnasFPNFeatureExtractor, 'ssd_mobilenet_v3_large': SSDMobileNetV3LargeFeatureExtractor, 'ssd_mobilenet_v3_small': SSDMobileNetV3SmallFeatureExtractor, 'ssd_mobilenet_edgetpu': SSDMobileNetEdgeTPUFeatureExtractor, 'ssd_resnet50_v1_fpn': ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, 'ssd_resnet101_v1_fpn': ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, 'ssd_resnet152_v1_fpn': ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, 'ssd_resnet50_v1_ppn': ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, 'ssd_resnet101_v1_ppn': ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, 'ssd_resnet152_v1_ppn': ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, 'embedded_ssd_mobilenet_v1': EmbeddedSSDMobileNetV1FeatureExtractor, 'ssd_pnasnet': SSDPNASNetFeatureExtractor, 'ssd_mobiledet_cpu': SSDMobileDetCPUFeatureExtractor, 'ssd_mobiledet_dsp': SSDMobileDetDSPFeatureExtractor, 'ssd_mobiledet_edgetpu': SSDMobileDetEdgeTPUFeatureExtractor, 'ssd_mobiledet_gpu': SSDMobileDetGPUFeatureExtractor, } FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_nas': frcnn_nas.FasterRCNNNASFeatureExtractor, 'faster_rcnn_pnas': frcnn_pnas.FasterRCNNPNASFeatureExtractor, 'faster_rcnn_inception_resnet_v2': frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, 'faster_rcnn_inception_v2': frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, 'faster_rcnn_resnet50': frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'faster_rcnn_resnet101': frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'faster_rcnn_resnet152': frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = {} FEATURE_EXTRACTOR_MAPS = [ SSD_FEATURE_EXTRACTOR_CLASS_MAP, FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP, CENTER_NET_EXTRACTOR_FUNCTION_MAP ] def _check_feature_extractor_exists(feature_extractor_type): feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS) if feature_extractor_type not in feature_extractors: raise ValueError('{} is not supported. See `model_builder.py` for features ' 'extractors compatible with different versions of ' 'Tensorflow'.format(feature_extractor_type)) def _build_ssd_feature_extractor(feature_extractor_config, is_training, freeze_batchnorm, reuse_weights=None): """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. Args: feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. is_training: True if this feature extractor is being built for training. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. reuse_weights: if the feature extractor should reuse weights. Returns: ssd_meta_arch.SSDFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ feature_type = feature_extractor_config.type depth_multiplier = feature_extractor_config.depth_multiplier min_depth = feature_extractor_config.min_depth pad_to_multiple = feature_extractor_config.pad_to_multiple use_explicit_padding = feature_extractor_config.use_explicit_padding use_depthwise = feature_extractor_config.use_depthwise is_keras = tf_version.is_tf2() if is_keras: conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparams) else: conv_hyperparams = hyperparams_builder.build( feature_extractor_config.conv_hyperparams, is_training) override_base_feature_extractor_hyperparams = ( feature_extractor_config.override_base_feature_extractor_hyperparams) if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) if is_keras: feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] else: feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] kwargs = { 'is_training': is_training, 'depth_multiplier': depth_multiplier, 'min_depth': min_depth, 'pad_to_multiple': pad_to_multiple, 'use_explicit_padding': use_explicit_padding, 'use_depthwise': use_depthwise, 'override_base_feature_extractor_hyperparams': override_base_feature_extractor_hyperparams } if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'): kwargs.update({ 'replace_preprocessor_with_placeholder': feature_extractor_config.replace_preprocessor_with_placeholder }) if feature_extractor_config.HasField('num_layers'): kwargs.update({'num_layers': feature_extractor_config.num_layers}) if is_keras: kwargs.update({ 'conv_hyperparams': conv_hyperparams, 'inplace_batchnorm_update': False, 'freeze_batchnorm': freeze_batchnorm }) else: kwargs.update({ 'conv_hyperparams_fn': conv_hyperparams, 'reuse_weights': reuse_weights, }) if feature_extractor_config.HasField('fpn'): kwargs.update({ 'fpn_min_level': feature_extractor_config.fpn.min_level, 'fpn_max_level': feature_extractor_config.fpn.max_level, 'additional_layer_depth': feature_extractor_config.fpn.additional_layer_depth, }) if feature_extractor_config.HasField('bifpn'): kwargs.update({ 'bifpn_min_level': feature_extractor_config.bifpn.min_level, 'bifpn_max_level': feature_extractor_config.bifpn.max_level, 'bifpn_num_iterations': feature_extractor_config.bifpn.num_iterations, 'bifpn_num_filters': feature_extractor_config.bifpn.num_filters, 'bifpn_combine_method': feature_extractor_config.bifpn.combine_method, }) return feature_extractor_class(**kwargs) def _build_ssd_model(ssd_config, is_training, add_summaries): """Builds an SSD detection model based on the model config. Args: ssd_config: A ssd.proto object containing the config for the desired SSDMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: SSDMetaArch based on the config. Raises: ValueError: If ssd_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = ssd_config.num_classes _check_feature_extractor_exists(ssd_config.feature_extractor.type) # Feature extractor feature_extractor = _build_ssd_feature_extractor( feature_extractor_config=ssd_config.feature_extractor, freeze_batchnorm=ssd_config.freeze_batchnorm, is_training=is_training) box_coder = box_coder_builder.build(ssd_config.box_coder) matcher = matcher_builder.build(ssd_config.matcher) region_similarity_calculator = sim_calc.build( ssd_config.similarity_calculator) encode_background_as_zeros = ssd_config.encode_background_as_zeros negative_class_weight = ssd_config.negative_class_weight anchor_generator = anchor_generator_builder.build( ssd_config.anchor_generator) if feature_extractor.is_keras_model: ssd_box_predictor = box_predictor_builder.build_keras( hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=anchor_generator .num_anchors_per_location(), box_predictor_config=ssd_config.box_predictor, is_training=is_training, num_classes=num_classes, add_background_class=ssd_config.add_background_class) else: ssd_box_predictor = box_predictor_builder.build( hyperparams_builder.build, ssd_config.box_predictor, is_training, num_classes, ssd_config.add_background_class) image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( ssd_config.post_processing) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize equalization_loss_config = ops.EqualizationLossConfig( weight=ssd_config.loss.equalization_loss.weight, exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) target_assigner_instance = target_assigner.TargetAssigner( region_similarity_calculator, matcher, box_coder, negative_class_weight=negative_class_weight) ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch kwargs = {} return ssd_meta_arch_fn( is_training=is_training, anchor_generator=anchor_generator, box_predictor=ssd_box_predictor, box_coder=box_coder, feature_extractor=feature_extractor, encode_background_as_zeros=encode_background_as_zeros, image_resizer_fn=image_resizer_fn, non_max_suppression_fn=non_max_suppression_fn, score_conversion_fn=score_conversion_fn, classification_loss=classification_loss, localization_loss=localization_loss, classification_loss_weight=classification_weight, localization_loss_weight=localization_weight, normalize_loss_by_num_matches=normalize_loss_by_num_matches, hard_example_miner=hard_example_miner, target_assigner_instance=target_assigner_instance, add_summaries=add_summaries, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, add_background_class=ssd_config.add_background_class, explicit_background_class=ssd_config.explicit_background_class, random_example_sampler=random_example_sampler, expected_loss_weights_fn=expected_loss_weights_fn, use_confidences_as_targets=ssd_config.use_confidences_as_targets, implicit_example_weight=ssd_config.implicit_example_weight, equalization_loss_config=equalization_loss_config, return_raw_detections_during_predict=( ssd_config.return_raw_detections_during_predict), **kwargs) def _build_faster_rcnn_feature_extractor( feature_extractor_config, is_training, reuse_weights=True, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. reuse_weights: if the feature extractor should reuse weights. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights=reuse_weights) def _build_faster_rcnn_keras_feature_extractor( feature_extractor_config, is_training, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] kwargs = {} if feature_extractor_config.HasField('conv_hyperparams'): kwargs.update({ 'conv_hyperparams': hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparams), 'override_base_feature_extractor_hyperparams': feature_extractor_config.override_base_feature_extractor_hyperparams }) if feature_extractor_config.HasField('fpn'): kwargs.update({ 'fpn_min_level': feature_extractor_config.fpn.min_level, 'fpn_max_level': feature_extractor_config.fpn.max_level, 'additional_layer_depth': feature_extractor_config.fpn.additional_layer_depth, }) return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, **kwargs) def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): """Builds a Faster R-CNN or R-FCN detection model based on the model config. Builds R-FCN model if the second_stage_box_predictor in the config is of type `rfcn_box_predictor` else builds a Faster R-CNN model. Args: frcnn_config: A faster_rcnn.proto object containing the config for the desired FasterRCNNMetaArch or RFCNMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: FasterRCNNMetaArch based on the config. Raises: ValueError: If frcnn_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = frcnn_config.num_classes image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) _check_feature_extractor_exists(frcnn_config.feature_extractor.type) is_keras = tf_version.is_tf2() if is_keras: feature_extractor = _build_faster_rcnn_keras_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) else: feature_extractor = _build_faster_rcnn_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) number_of_stages = frcnn_config.number_of_stages first_stage_anchor_generator = anchor_generator_builder.build( frcnn_config.first_stage_anchor_generator) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate if is_keras: first_stage_box_predictor_arg_scope_fn = ( hyperparams_builder.KerasLayerHyperparams( frcnn_config.first_stage_box_predictor_conv_hyperparams)) else: first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) first_stage_box_predictor_kernel_size = ( frcnn_config.first_stage_box_predictor_kernel_size) first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size use_static_shapes = frcnn_config.use_static_shapes and ( frcnn_config.use_static_shapes_for_eval or is_training) first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.first_stage_positive_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) first_stage_max_proposals = frcnn_config.first_stage_max_proposals if (frcnn_config.first_stage_nms_iou_threshold < 0 or frcnn_config.first_stage_nms_iou_threshold > 1.0): raise ValueError('iou_threshold not in [0, 1.0].') if (is_training and frcnn_config.second_stage_batch_size > first_stage_max_proposals): raise ValueError('second_stage_batch_size should be no greater than ' 'first_stage_max_proposals.') first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=frcnn_config.first_stage_nms_score_threshold, iou_thresh=frcnn_config.first_stage_nms_iou_threshold, max_size_per_class=frcnn_config.first_stage_max_proposals, max_total_size=frcnn_config.first_stage_max_proposals, use_static_shapes=use_static_shapes, use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage, use_combined_nms=frcnn_config.use_combined_nms_in_first_stage) first_stage_loc_loss_weight = ( frcnn_config.first_stage_localization_loss_weight) first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight initial_crop_size = frcnn_config.initial_crop_size maxpool_kernel_size = frcnn_config.maxpool_kernel_size maxpool_stride = frcnn_config.maxpool_stride second_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'detection', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) if is_keras: second_stage_box_predictor = box_predictor_builder.build_keras( hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[1], box_predictor_config=frcnn_config.second_stage_box_predictor, is_training=is_training, num_classes=num_classes) else: second_stage_box_predictor = box_predictor_builder.build( hyperparams_builder.build, frcnn_config.second_stage_box_predictor, is_training=is_training, num_classes=num_classes) second_stage_batch_size = frcnn_config.second_stage_batch_size second_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.second_stage_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn ) = post_processing_builder.build(frcnn_config.second_stage_post_processing) second_stage_localization_loss_weight = ( frcnn_config.second_stage_localization_loss_weight) second_stage_classification_loss = ( losses_builder.build_faster_rcnn_classification_loss( frcnn_config.second_stage_classification_loss)) second_stage_classification_loss_weight = ( frcnn_config.second_stage_classification_loss_weight) second_stage_mask_prediction_loss_weight = ( frcnn_config.second_stage_mask_prediction_loss_weight) hard_example_miner = None if frcnn_config.HasField('hard_example_miner'): hard_example_miner = losses_builder.build_hard_example_miner( frcnn_config.hard_example_miner, second_stage_classification_loss_weight, second_stage_localization_loss_weight) crop_and_resize_fn = ( spatial_ops.multilevel_matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize else spatial_ops.multilevel_native_crop_and_resize) clip_anchors_to_image = ( frcnn_config.clip_anchors_to_image) common_kwargs = { 'is_training': is_training, 'num_classes': num_classes, 'image_resizer_fn': image_resizer_fn, 'feature_extractor': feature_extractor, 'number_of_stages': number_of_stages, 'first_stage_anchor_generator': first_stage_anchor_generator, 'first_stage_target_assigner': first_stage_target_assigner, 'first_stage_atrous_rate': first_stage_atrous_rate, 'first_stage_box_predictor_arg_scope_fn': first_stage_box_predictor_arg_scope_fn, 'first_stage_box_predictor_kernel_size': first_stage_box_predictor_kernel_size, 'first_stage_box_predictor_depth': first_stage_box_predictor_depth, 'first_stage_minibatch_size': first_stage_minibatch_size, 'first_stage_sampler': first_stage_sampler, 'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn, 'first_stage_max_proposals': first_stage_max_proposals, 'first_stage_localization_loss_weight': first_stage_loc_loss_weight, 'first_stage_objectness_loss_weight': first_stage_obj_loss_weight, 'second_stage_target_assigner': second_stage_target_assigner, 'second_stage_batch_size': second_stage_batch_size, 'second_stage_sampler': second_stage_sampler, 'second_stage_non_max_suppression_fn': second_stage_non_max_suppression_fn, 'second_stage_score_conversion_fn': second_stage_score_conversion_fn, 'second_stage_localization_loss_weight': second_stage_localization_loss_weight, 'second_stage_classification_loss': second_stage_classification_loss, 'second_stage_classification_loss_weight': second_stage_classification_loss_weight, 'hard_example_miner': hard_example_miner, 'add_summaries': add_summaries, 'crop_and_resize_fn': crop_and_resize_fn, 'clip_anchors_to_image': clip_anchors_to_image, 'use_static_shapes': use_static_shapes, 'resize_masks': frcnn_config.resize_masks, 'return_raw_detections_during_predict': frcnn_config.return_raw_detections_during_predict, 'output_final_box_features': frcnn_config.output_final_box_features, 'output_final_box_rpn_features': frcnn_config.output_final_box_rpn_features, } if ((not is_keras and isinstance(second_stage_box_predictor, rfcn_box_predictor.RfcnBoxPredictor)) or (is_keras and isinstance(second_stage_box_predictor, rfcn_keras_box_predictor.RfcnKerasBoxPredictor))): return rfcn_meta_arch.RFCNMetaArch( second_stage_rfcn_box_predictor=second_stage_box_predictor, **common_kwargs) elif frcnn_config.HasField('context_config'): context_config = frcnn_config.context_config common_kwargs.update({ 'attention_bottleneck_dimension': context_config.attention_bottleneck_dimension, 'attention_temperature': context_config.attention_temperature, 'use_self_attention': context_config.use_self_attention, 'use_long_term_attention': context_config.use_long_term_attention, 'self_attention_in_sequence': context_config.self_attention_in_sequence, 'num_attention_heads': context_config.num_attention_heads, 'num_attention_layers': context_config.num_attention_layers, 'attention_position': context_config.attention_position }) return context_rcnn_meta_arch.ContextRCNNMetaArch( initial_crop_size=initial_crop_size, maxpool_kernel_size=maxpool_kernel_size, maxpool_stride=maxpool_stride, second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), **common_kwargs) else: return faster_rcnn_meta_arch.FasterRCNNMetaArch( initial_crop_size=initial_crop_size, maxpool_kernel_size=maxpool_kernel_size, maxpool_stride=maxpool_stride, second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), **common_kwargs) EXPERIMENTAL_META_ARCH_BUILDER_MAP = { } def _build_experimental_model(config, is_training, add_summaries=True): return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name]( is_training, add_summaries) # The class ID in the groundtruth/model architecture is usually 0-based while # the ID in the label map is 1-based. The offset is used to convert between the # the two. CLASS_ID_OFFSET = 1 KEYPOINT_STD_DEV_DEFAULT = 1.0 def keypoint_proto_to_params(kp_config, keypoint_map_dict): """Converts CenterNet.KeypointEstimation proto to parameter namedtuple.""" label_map_item = keypoint_map_dict[kp_config.keypoint_class_name] classification_loss, localization_loss, _, _, _, _, _ = ( losses_builder.build(kp_config.loss)) keypoint_indices = [ keypoint.id for keypoint in label_map_item.keypoints ] keypoint_labels = [ keypoint.label for keypoint in label_map_item.keypoints ] keypoint_std_dev_dict = { label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels } if kp_config.keypoint_label_to_std: for label, value in kp_config.keypoint_label_to_std.items(): keypoint_std_dev_dict[label] = value keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels] if kp_config.HasField('heatmap_head_params'): heatmap_head_num_filters = list(kp_config.heatmap_head_params.num_filters) heatmap_head_kernel_sizes = list(kp_config.heatmap_head_params.kernel_sizes) else: heatmap_head_num_filters = [256] heatmap_head_kernel_sizes = [3] if kp_config.HasField('offset_head_params'): offset_head_num_filters = list(kp_config.offset_head_params.num_filters) offset_head_kernel_sizes = list(kp_config.offset_head_params.kernel_sizes) else: offset_head_num_filters = [256] offset_head_kernel_sizes = [3] if kp_config.HasField('regress_head_params'): regress_head_num_filters = list(kp_config.regress_head_params.num_filters) regress_head_kernel_sizes = list( kp_config.regress_head_params.kernel_sizes) else: regress_head_num_filters = [256] regress_head_kernel_sizes = [3] return center_net_meta_arch.KeypointEstimationParams( task_name=kp_config.task_name, class_id=label_map_item.id - CLASS_ID_OFFSET, keypoint_indices=keypoint_indices, classification_loss=classification_loss, localization_loss=localization_loss, keypoint_labels=keypoint_labels, keypoint_std_dev=keypoint_std_dev, task_loss_weight=kp_config.task_loss_weight, keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight, keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight, keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight, heatmap_bias_init=kp_config.heatmap_bias_init, keypoint_candidate_score_threshold=( kp_config.keypoint_candidate_score_threshold), num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint, peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size, unmatched_keypoint_score=kp_config.unmatched_keypoint_score, box_scale=kp_config.box_scale, candidate_search_scale=kp_config.candidate_search_scale, candidate_ranking_mode=kp_config.candidate_ranking_mode, offset_peak_radius=kp_config.offset_peak_radius, per_keypoint_offset=kp_config.per_keypoint_offset, predict_depth=kp_config.predict_depth, per_keypoint_depth=kp_config.per_keypoint_depth, keypoint_depth_loss_weight=kp_config.keypoint_depth_loss_weight, score_distance_offset=kp_config.score_distance_offset, clip_out_of_frame_keypoints=kp_config.clip_out_of_frame_keypoints, rescore_instances=kp_config.rescore_instances, heatmap_head_num_filters=heatmap_head_num_filters, heatmap_head_kernel_sizes=heatmap_head_kernel_sizes, offset_head_num_filters=offset_head_num_filters, offset_head_kernel_sizes=offset_head_kernel_sizes, regress_head_num_filters=regress_head_num_filters, regress_head_kernel_sizes=regress_head_kernel_sizes) def object_detection_proto_to_params(od_config): """Converts CenterNet.ObjectDetection proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy classification loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the classification loss # directly. loss.classification_loss.weighted_sigmoid.CopyFrom( losses_pb2.WeightedSigmoidClassificationLoss()) loss.localization_loss.CopyFrom(od_config.localization_loss) _, localization_loss, _, _, _, _, _ = (losses_builder.build(loss)) if od_config.HasField('scale_head_params'): scale_head_num_filters = list(od_config.scale_head_params.num_filters) scale_head_kernel_sizes = list(od_config.scale_head_params.kernel_sizes) else: scale_head_num_filters = [256] scale_head_kernel_sizes = [3] if od_config.HasField('offset_head_params'): offset_head_num_filters = list(od_config.offset_head_params.num_filters) offset_head_kernel_sizes = list(od_config.offset_head_params.kernel_sizes) else: offset_head_num_filters = [256] offset_head_kernel_sizes = [3] return center_net_meta_arch.ObjectDetectionParams( localization_loss=localization_loss, scale_loss_weight=od_config.scale_loss_weight, offset_loss_weight=od_config.offset_loss_weight, task_loss_weight=od_config.task_loss_weight, scale_head_num_filters=scale_head_num_filters, scale_head_kernel_sizes=scale_head_kernel_sizes, offset_head_num_filters=offset_head_num_filters, offset_head_kernel_sizes=offset_head_kernel_sizes) def object_center_proto_to_params(oc_config): """Converts CenterNet.ObjectCenter proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the localization loss # directly. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(oc_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) keypoint_weights_for_center = [] if oc_config.keypoint_weights_for_center: keypoint_weights_for_center = list(oc_config.keypoint_weights_for_center) if oc_config.HasField('center_head_params'): center_head_num_filters = list(oc_config.center_head_params.num_filters) center_head_kernel_sizes = list(oc_config.center_head_params.kernel_sizes) else: center_head_num_filters = [256] center_head_kernel_sizes = [3] return center_net_meta_arch.ObjectCenterParams( classification_loss=classification_loss, object_center_loss_weight=oc_config.object_center_loss_weight, heatmap_bias_init=oc_config.heatmap_bias_init, min_box_overlap_iou=oc_config.min_box_overlap_iou, max_box_predictions=oc_config.max_box_predictions, use_labeled_classes=oc_config.use_labeled_classes, keypoint_weights_for_center=keypoint_weights_for_center, center_head_num_filters=center_head_num_filters, center_head_kernel_sizes=center_head_kernel_sizes) def mask_proto_to_params(mask_config): """Converts CenterNet.MaskEstimation proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(mask_config.classification_loss) classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) if mask_config.HasField('mask_head_params'): mask_head_num_filters = list(mask_config.mask_head_params.num_filters) mask_head_kernel_sizes = list(mask_config.mask_head_params.kernel_sizes) else: mask_head_num_filters = [256] mask_head_kernel_sizes = [3] return center_net_meta_arch.MaskParams( classification_loss=classification_loss, task_loss_weight=mask_config.task_loss_weight, mask_height=mask_config.mask_height, mask_width=mask_config.mask_width, score_threshold=mask_config.score_threshold, heatmap_bias_init=mask_config.heatmap_bias_init, mask_head_num_filters=mask_head_num_filters, mask_head_kernel_sizes=mask_head_kernel_sizes) def densepose_proto_to_params(densepose_config): """Converts CenterNet.DensePoseEstimation proto to parameter namedtuple.""" classification_loss, localization_loss, _, _, _, _, _ = ( losses_builder.build(densepose_config.loss)) return center_net_meta_arch.DensePoseParams( class_id=densepose_config.class_id, classification_loss=classification_loss, localization_loss=localization_loss, part_loss_weight=densepose_config.part_loss_weight, coordinate_loss_weight=densepose_config.coordinate_loss_weight, num_parts=densepose_config.num_parts, task_loss_weight=densepose_config.task_loss_weight, upsample_to_input_res=densepose_config.upsample_to_input_res, heatmap_bias_init=densepose_config.heatmap_bias_init) def tracking_proto_to_params(tracking_config): """Converts CenterNet.TrackEstimation proto to parameter namedtuple.""" loss = losses_pb2.Loss() # Add dummy localization loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the localization loss # directly. loss.localization_loss.weighted_l2.CopyFrom( losses_pb2.WeightedL2LocalizationLoss()) loss.classification_loss.CopyFrom(tracking_config.classification_loss) classification_loss, _, _, _, _, _, _ = losses_builder.build(loss) return center_net_meta_arch.TrackParams( num_track_ids=tracking_config.num_track_ids, reid_embed_size=tracking_config.reid_embed_size, classification_loss=classification_loss, num_fc_layers=tracking_config.num_fc_layers, task_loss_weight=tracking_config.task_loss_weight) def temporal_offset_proto_to_params(temporal_offset_config): """Converts CenterNet.TemporalOffsetEstimation proto to param-tuple.""" loss = losses_pb2.Loss() # Add dummy classification loss to avoid the loss_builder throwing error. # TODO(yuhuic): update the loss builder to take the classification loss # directly. loss.classification_loss.weighted_sigmoid.CopyFrom( losses_pb2.WeightedSigmoidClassificationLoss()) loss.localization_loss.CopyFrom(temporal_offset_config.localization_loss) _, localization_loss, _, _, _, _, _ = losses_builder.build(loss) return center_net_meta_arch.TemporalOffsetParams( localization_loss=localization_loss, task_loss_weight=temporal_offset_config.task_loss_weight) def _build_center_net_model(center_net_config, is_training, add_summaries): """Build a CenterNet detection model. Args: center_net_config: A CenterNet proto object with model configuration. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: CenterNetMetaArch based on the config. """ image_resizer_fn = image_resizer_builder.build( center_net_config.image_resizer) _check_feature_extractor_exists(center_net_config.feature_extractor.type) feature_extractor = _build_center_net_feature_extractor( center_net_config.feature_extractor, is_training) object_center_params = object_center_proto_to_params( center_net_config.object_center_params) object_detection_params = None if center_net_config.HasField('object_detection_task'): object_detection_params = object_detection_proto_to_params( center_net_config.object_detection_task) if center_net_config.HasField('deepmac_mask_estimation'): logging.warn(('Building experimental DeepMAC meta-arch.' ' Some features may be omitted.')) deepmac_params = deepmac_meta_arch.deepmac_proto_to_params( center_net_config.deepmac_mask_estimation) return deepmac_meta_arch.DeepMACMetaArch( is_training=is_training, add_summaries=add_summaries, num_classes=center_net_config.num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, deepmac_params=deepmac_params) keypoint_params_dict = None if center_net_config.keypoint_estimation_task: label_map_proto = label_map_util.load_labelmap( center_net_config.keypoint_label_map_path) keypoint_map_dict = { item.name: item for item in label_map_proto.item if item.keypoints } keypoint_params_dict = {} keypoint_class_id_set = set() all_keypoint_indices = [] for task in center_net_config.keypoint_estimation_task: kp_params = keypoint_proto_to_params(task, keypoint_map_dict) keypoint_params_dict[task.task_name] = kp_params all_keypoint_indices.extend(kp_params.keypoint_indices) if kp_params.class_id in keypoint_class_id_set: raise ValueError(('Multiple keypoint tasks map to the same class id is ' 'not allowed: %d' % kp_params.class_id)) else: keypoint_class_id_set.add(kp_params.class_id) if len(all_keypoint_indices) > len(set(all_keypoint_indices)): raise ValueError('Some keypoint indices are used more than once.') mask_params = None if center_net_config.HasField('mask_estimation_task'): mask_params = mask_proto_to_params(center_net_config.mask_estimation_task) densepose_params = None if center_net_config.HasField('densepose_estimation_task'): densepose_params = densepose_proto_to_params( center_net_config.densepose_estimation_task) track_params = None if center_net_config.HasField('track_estimation_task'): track_params = tracking_proto_to_params( center_net_config.track_estimation_task) temporal_offset_params = None if center_net_config.HasField('temporal_offset_task'): temporal_offset_params = temporal_offset_proto_to_params( center_net_config.temporal_offset_task) non_max_suppression_fn = None if center_net_config.HasField('post_processing'): non_max_suppression_fn, _ = post_processing_builder.build( center_net_config.post_processing) return center_net_meta_arch.CenterNetMetaArch( is_training=is_training, add_summaries=add_summaries, num_classes=center_net_config.num_classes, feature_extractor=feature_extractor, image_resizer_fn=image_resizer_fn, object_center_params=object_center_params, object_detection_params=object_detection_params, keypoint_params_dict=keypoint_params_dict, mask_params=mask_params, densepose_params=densepose_params, track_params=track_params, temporal_offset_params=temporal_offset_params, use_depthwise=center_net_config.use_depthwise, compute_heatmap_sparse=center_net_config.compute_heatmap_sparse, non_max_suppression_fn=non_max_suppression_fn) def _build_center_net_feature_extractor(feature_extractor_config, is_training): """Build a CenterNet feature extractor from the given config.""" if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP: raise ValueError('\'{}\' is not a known CenterNet feature extractor type' .format(feature_extractor_config.type)) # For backwards compatibility: use_separable_conv = ( feature_extractor_config.use_separable_conv or feature_extractor_config.type == 'mobilenet_v2_fpn_sep_conv') kwargs = { 'channel_means': list(feature_extractor_config.channel_means), 'channel_stds': list(feature_extractor_config.channel_stds), 'bgr_ordering': feature_extractor_config.bgr_ordering, 'depth_multiplier': feature_extractor_config.depth_multiplier, 'use_separable_conv': use_separable_conv, 'upsampling_interpolation': feature_extractor_config.upsampling_interpolation, } return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type]( **kwargs) META_ARCH_BUILDER_MAP = { 'ssd': _build_ssd_model, 'faster_rcnn': _build_faster_rcnn_model, 'experimental_model': _build_experimental_model, 'center_net': _build_center_net_model } def build(model_config, is_training, add_summaries=True): """Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tensorflow summaries in the model graph. Returns: DetectionModel based on the config. Raises: ValueError: On invalid meta architecture or model. """ if not isinstance(model_config, model_pb2.DetectionModel): raise ValueError('model_config not of type model_pb2.DetectionModel.') meta_architecture = model_config.WhichOneof('model') if meta_architecture not in META_ARCH_BUILDER_MAP: raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) else: build_func = META_ARCH_BUILDER_MAP[meta_architecture] return build_func(getattr(model_config, meta_architecture), is_training, add_summaries)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/model_builder.py
model_builder.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from object_detection.data_decoders import tf_example_decoder from object_detection.data_decoders import tf_sequence_example_decoder from object_detection.protos import input_reader_pb2 def build(input_reader_config): """Builds a DataDecoder based only on the open source config proto. Args: input_reader_config: An input_reader_pb2.InputReader object. Returns: A DataDecoder based on the input_reader_config. Raises: ValueError: On invalid input reader proto. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': label_map_proto_file = None if input_reader_config.HasField('label_map_path'): label_map_proto_file = input_reader_config.label_map_path input_type = input_reader_config.input_type if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=input_reader_config.load_instance_masks, load_multiclass_scores=input_reader_config.load_multiclass_scores, load_context_features=input_reader_config.load_context_features, instance_mask_type=input_reader_config.mask_type, label_map_proto_file=label_map_proto_file, use_display_name=input_reader_config.use_display_name, num_additional_channels=input_reader_config.num_additional_channels, num_keypoints=input_reader_config.num_keypoints, expand_hierarchy_labels=input_reader_config.expand_labels_hierarchy, load_dense_pose=input_reader_config.load_dense_pose, load_track_id=input_reader_config.load_track_id, load_keypoint_depth_features=input_reader_config .load_keypoint_depth_features) return decoder elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( label_map_proto_file=label_map_proto_file, load_context_features=input_reader_config.load_context_features, load_context_image_ids=input_reader_config.load_context_image_ids) return decoder raise ValueError('Unsupported input_type in config.') raise ValueError('Unsupported input_reader_config.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/decoder_builder.py
decoder_builder.py
import functools from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import losses from object_detection.protos import losses_pb2 from object_detection.utils import ops def build(loss_config): """Build losses based on the config. Builds classification, localization losses and optionally a hard example miner based on the config. Args: loss_config: A losses_pb2.Loss object. Returns: classification_loss: Classification loss object. localization_loss: Localization loss object. classification_weight: Classification loss weight. localization_weight: Localization loss weight. hard_example_miner: Hard example miner object. random_example_sampler: BalancedPositiveNegativeSampler object. Raises: ValueError: If hard_example_miner is used with sigmoid_focal_loss. ValueError: If random_example_sampler is getting non-positive value as desired positive example fraction. """ classification_loss = _build_classification_loss( loss_config.classification_loss) localization_loss = _build_localization_loss( loss_config.localization_loss) classification_weight = loss_config.classification_weight localization_weight = loss_config.localization_weight hard_example_miner = None if loss_config.HasField('hard_example_miner'): if (loss_config.classification_loss.WhichOneof('classification_loss') == 'weighted_sigmoid_focal'): raise ValueError('HardExampleMiner should not be used with sigmoid focal ' 'loss') hard_example_miner = build_hard_example_miner( loss_config.hard_example_miner, classification_weight, localization_weight) random_example_sampler = None if loss_config.HasField('random_example_sampler'): if loss_config.random_example_sampler.positive_sample_fraction <= 0: raise ValueError('RandomExampleSampler should not use non-positive' 'value as positive sample fraction.') random_example_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=loss_config.random_example_sampler. positive_sample_fraction) if loss_config.expected_loss_weights == loss_config.NONE: expected_loss_weights_fn = None elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING: expected_loss_weights_fn = functools.partial( ops.expected_classification_loss_by_expected_sampling, min_num_negative_samples=loss_config.min_num_negative_samples, desired_negative_sampling_ratio=loss_config .desired_negative_sampling_ratio) elif (loss_config.expected_loss_weights == loss_config .REWEIGHTING_UNMATCHED_ANCHORS): expected_loss_weights_fn = functools.partial( ops.expected_classification_loss_by_reweighting_unmatched_anchors, min_num_negative_samples=loss_config.min_num_negative_samples, desired_negative_sampling_ratio=loss_config .desired_negative_sampling_ratio) else: raise ValueError('Not a valid value for expected_classification_loss.') return (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) def build_hard_example_miner(config, classification_weight, localization_weight): """Builds hard example miner based on the config. Args: config: A losses_pb2.HardExampleMiner object. classification_weight: Classification loss weight. localization_weight: Localization loss weight. Returns: Hard example miner. """ loss_type = None if config.loss_type == losses_pb2.HardExampleMiner.BOTH: loss_type = 'both' if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION: loss_type = 'cls' if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION: loss_type = 'loc' max_negatives_per_positive = None num_hard_examples = None if config.max_negatives_per_positive > 0: max_negatives_per_positive = config.max_negatives_per_positive if config.num_hard_examples > 0: num_hard_examples = config.num_hard_examples hard_example_miner = losses.HardExampleMiner( num_hard_examples=num_hard_examples, iou_threshold=config.iou_threshold, loss_type=loss_type, cls_loss_weight=classification_weight, loc_loss_weight=localization_weight, max_negatives_per_positive=max_negatives_per_positive, min_negatives_per_image=config.min_negatives_per_image) return hard_example_miner def build_faster_rcnn_classification_loss(loss_config): """Builds a classification loss for Faster RCNN based on the loss config. Args: loss_config: A losses_pb2.ClassificationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.ClassificationLoss): raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') loss_type = loss_config.WhichOneof('classification_loss') if loss_type == 'weighted_sigmoid': return losses.WeightedSigmoidClassificationLoss() if loss_type == 'weighted_softmax': config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) if loss_type == 'weighted_logits_softmax': config = loss_config.weighted_logits_softmax return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=config.logit_scale) if loss_type == 'weighted_sigmoid_focal': config = loss_config.weighted_sigmoid_focal alpha = None if config.HasField('alpha'): alpha = config.alpha return losses.SigmoidFocalClassificationLoss( gamma=config.gamma, alpha=alpha) # By default, Faster RCNN second stage classifier uses Softmax loss # with anchor-wise outputs. config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) def _build_localization_loss(loss_config): """Builds a localization loss based on the loss config. Args: loss_config: A losses_pb2.LocalizationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.LocalizationLoss): raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.') loss_type = loss_config.WhichOneof('localization_loss') if loss_type == 'weighted_l2': return losses.WeightedL2LocalizationLoss() if loss_type == 'weighted_smooth_l1': return losses.WeightedSmoothL1LocalizationLoss( loss_config.weighted_smooth_l1.delta) if loss_type == 'weighted_iou': return losses.WeightedIOULocalizationLoss() if loss_type == 'l1_localization_loss': return losses.L1LocalizationLoss() if loss_type == 'weighted_giou': return losses.WeightedGIOULocalizationLoss() raise ValueError('Empty loss config.') def _build_classification_loss(loss_config): """Builds a classification loss based on the loss config. Args: loss_config: A losses_pb2.ClassificationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.ClassificationLoss): raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') loss_type = loss_config.WhichOneof('classification_loss') if loss_type == 'weighted_sigmoid': return losses.WeightedSigmoidClassificationLoss() elif loss_type == 'weighted_sigmoid_focal': config = loss_config.weighted_sigmoid_focal alpha = None if config.HasField('alpha'): alpha = config.alpha return losses.SigmoidFocalClassificationLoss( gamma=config.gamma, alpha=alpha) elif loss_type == 'weighted_softmax': config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) elif loss_type == 'weighted_logits_softmax': config = loss_config.weighted_logits_softmax return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=config.logit_scale) elif loss_type == 'bootstrapped_sigmoid': config = loss_config.bootstrapped_sigmoid return losses.BootstrappedSigmoidClassificationLoss( alpha=config.alpha, bootstrap_type=('hard' if config.hard_bootstrap else 'soft')) elif loss_type == 'penalty_reduced_logistic_focal_loss': config = loss_config.penalty_reduced_logistic_focal_loss return losses.PenaltyReducedLogisticFocalLoss( alpha=config.alpha, beta=config.beta) elif loss_type == 'weighted_dice_classification_loss': config = loss_config.weighted_dice_classification_loss return losses.WeightedDiceClassificationLoss( squared_normalization=config.squared_normalization) else: raise ValueError('Empty loss config.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/losses_builder.py
losses_builder.py
import collections import tensorflow.compat.v1 as tf from object_detection.predictors import convolutional_box_predictor from object_detection.predictors import convolutional_keras_box_predictor from object_detection.predictors import mask_rcnn_box_predictor from object_detection.predictors import mask_rcnn_keras_box_predictor from object_detection.predictors import rfcn_box_predictor from object_detection.predictors import rfcn_keras_box_predictor from object_detection.predictors.heads import box_head from object_detection.predictors.heads import class_head from object_detection.predictors.heads import keras_box_head from object_detection.predictors.heads import keras_class_head from object_detection.predictors.heads import keras_mask_head from object_detection.predictors.heads import mask_head from object_detection.protos import box_predictor_pb2 def build_convolutional_box_predictor(is_training, num_classes, conv_hyperparams_fn, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, apply_sigmoid_to_scores=False, add_background_class=True, class_prediction_bias_init=0.0, use_depthwise=False, box_encodings_clip_range=None): """Builds the ConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. num_layers_before_predictor: Number of the additional conv layers before the predictor. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). box_code_size: Size of encoding for each box. apply_sigmoid_to_scores: If True, apply the sigmoid on the output class_predictions. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: Constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping the box_encodings. Returns: A ConvolutionalBoxPredictor class. """ box_prediction_head = box_head.ConvolutionalBoxHead( is_training=is_training, box_code_size=box_code_size, kernel_size=kernel_size, use_depthwise=use_depthwise, box_encodings_clip_range=box_encodings_clip_range) class_prediction_head = class_head.ConvolutionalClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, apply_sigmoid_to_scores=apply_sigmoid_to_scores, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise) other_heads = {} return convolutional_box_predictor.ConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=conv_hyperparams_fn, num_layers_before_predictor=num_layers_before_predictor, min_depth=min_depth, max_depth=max_depth) def build_convolutional_keras_box_predictor(is_training, num_classes, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, num_predictions_per_location_list, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, add_background_class=True, class_prediction_bias_init=0.0, use_depthwise=False, box_encodings_clip_range=None, name='BoxPredictor'): """Builds the Keras ConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. num_layers_before_predictor: Number of the additional conv layers before the predictor. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). box_code_size: Size of encoding for each box. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping the box_encodings. name: A string name scope to assign to the box predictor. If `None`, Keras will auto-generate one from the class name. Returns: A Keras ConvolutionalBoxPredictor class. """ box_prediction_heads = [] class_prediction_heads = [] other_heads = {} for stack_index, num_predictions_per_location in enumerate( num_predictions_per_location_list): box_prediction_heads.append( keras_box_head.ConvolutionalBoxHead( is_training=is_training, box_code_size=box_code_size, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, num_predictions_per_location=num_predictions_per_location, use_depthwise=use_depthwise, box_encodings_clip_range=box_encodings_clip_range, name='ConvolutionalBoxHead_%d' % stack_index)) class_prediction_heads.append( keras_class_head.ConvolutionalClassHead( is_training=is_training, num_class_slots=( num_classes + 1 if add_background_class else num_classes), use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, num_predictions_per_location=num_predictions_per_location, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise, name='ConvolutionalClassHead_%d' % stack_index)) return convolutional_keras_box_predictor.ConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_heads=box_prediction_heads, class_prediction_heads=class_prediction_heads, other_heads=other_heads, conv_hyperparams=conv_hyperparams, num_layers_before_predictor=num_layers_before_predictor, min_depth=min_depth, max_depth=max_depth, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) def build_weight_shared_convolutional_box_predictor( is_training, num_classes, conv_hyperparams_fn, depth, num_layers_before_predictor, box_code_size, kernel_size=3, add_background_class=True, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, share_prediction_tower=False, apply_batch_norm=True, use_depthwise=False, score_converter_fn=tf.identity, box_encodings_clip_range=None, keyword_args=None): """Builds and returns a WeightSharedConvolutionalBoxPredictor class. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. score_converter_fn: Callable score converter to perform elementwise op on class scores. box_encodings_clip_range: Min and max values for clipping the box_encodings. keyword_args: A dictionary with additional args. Returns: A WeightSharedConvolutionalBoxPredictor class. """ box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( box_code_size=box_code_size, kernel_size=kernel_size, use_depthwise=use_depthwise, box_encodings_clip_range=box_encodings_clip_range) class_prediction_head = ( class_head.WeightSharedConvolutionalClassHead( num_class_slots=( num_classes + 1 if add_background_class else num_classes), kernel_size=kernel_size, class_prediction_bias_init=class_prediction_bias_init, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, use_depthwise=use_depthwise, score_converter_fn=score_converter_fn)) other_heads = {} return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=conv_hyperparams_fn, depth=depth, num_layers_before_predictor=num_layers_before_predictor, kernel_size=kernel_size, apply_batch_norm=apply_batch_norm, share_prediction_tower=share_prediction_tower, use_depthwise=use_depthwise) def build_weight_shared_convolutional_keras_box_predictor( is_training, num_classes, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, num_predictions_per_location_list, depth, num_layers_before_predictor, box_code_size, kernel_size=3, add_background_class=True, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, share_prediction_tower=False, apply_batch_norm=True, use_depthwise=False, apply_conv_hyperparams_to_heads=False, apply_conv_hyperparams_pointwise=False, score_converter_fn=tf.identity, box_encodings_clip_range=None, name='WeightSharedConvolutionalBoxPredictor', keyword_args=None): """Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to depthwise seperable convolution layers in the box and class heads. By default, the conv_hyperparams are only applied to layers in the predictor tower when using depthwise separable convolutions. apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to the pointwise_initializer and pointwise_regularizer when using depthwise separable convolutions. By default, conv_hyperparams are only applied to the depthwise initializer and regularizer when use_depthwise is true. score_converter_fn: Callable score converter to perform elementwise op on class scores. box_encodings_clip_range: Min and max values for clipping the box_encodings. name: A string name scope to assign to the box predictor. If `None`, Keras will auto-generate one from the class name. keyword_args: A dictionary with additional args. Returns: A Keras WeightSharedConvolutionalBoxPredictor class. """ if len(set(num_predictions_per_location_list)) > 1: raise ValueError('num predictions per location must be same for all' 'feature maps, found: {}'.format( num_predictions_per_location_list)) num_predictions_per_location = num_predictions_per_location_list[0] box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( box_code_size=box_code_size, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, num_predictions_per_location=num_predictions_per_location, use_depthwise=use_depthwise, apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads, box_encodings_clip_range=box_encodings_clip_range, name='WeightSharedConvolutionalBoxHead') class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( num_class_slots=( num_classes + 1 if add_background_class else num_classes), use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, num_predictions_per_location=num_predictions_per_location, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise, apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads, score_converter_fn=score_converter_fn, name='WeightSharedConvolutionalClassHead') other_heads = {} return ( convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams=conv_hyperparams, depth=depth, num_layers_before_predictor=num_layers_before_predictor, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, kernel_size=kernel_size, apply_batch_norm=apply_batch_norm, share_prediction_tower=share_prediction_tower, use_depthwise=use_depthwise, apply_conv_hyperparams_pointwise=apply_conv_hyperparams_pointwise, name=name)) def build_mask_rcnn_keras_box_predictor(is_training, num_classes, fc_hyperparams, freeze_batchnorm, use_dropout, dropout_keep_prob, box_code_size, add_background_class=True, share_box_across_classes=False, predict_instance_masks=False, conv_hyperparams=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample_masks=False): """Builds and returns a MaskRCNNKerasBoxPredictor class. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for fully connected dense ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. add_background_class: Whether to add an implicit background class. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. predict_instance_masks: If True, will add a third stage mask prediction to the returned class. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample_masks: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Returns: A MaskRCNNKerasBoxPredictor class. """ box_prediction_head = keras_box_head.MaskRCNNBoxHead( is_training=is_training, num_classes=num_classes, fc_hyperparams=fc_hyperparams, freeze_batchnorm=freeze_batchnorm, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, box_code_size=box_code_size, share_box_across_classes=share_box_across_classes) class_prediction_head = keras_class_head.MaskRCNNClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, fc_hyperparams=fc_hyperparams, freeze_batchnorm=freeze_batchnorm, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob) third_stage_heads = {} if predict_instance_masks: third_stage_heads[ mask_rcnn_box_predictor. MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead( is_training=is_training, num_classes=num_classes, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, mask_height=mask_height, mask_width=mask_width, mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, mask_prediction_conv_depth=mask_prediction_conv_depth, masks_are_class_agnostic=masks_are_class_agnostic, convolve_then_upsample=convolve_then_upsample_masks) return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor( is_training=is_training, num_classes=num_classes, freeze_batchnorm=freeze_batchnorm, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, third_stage_heads=third_stage_heads) def build_mask_rcnn_box_predictor(is_training, num_classes, fc_hyperparams_fn, use_dropout, dropout_keep_prob, box_code_size, add_background_class=True, share_box_across_classes=False, predict_instance_masks=False, conv_hyperparams_fn=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample_masks=False): """Builds and returns a MaskRCNNBoxPredictor class. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. add_background_class: Whether to add an implicit background class. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. predict_instance_masks: If True, will add a third stage mask prediction to the returned class. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample_masks: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Returns: A MaskRCNNBoxPredictor class. """ box_prediction_head = box_head.MaskRCNNBoxHead( is_training=is_training, num_classes=num_classes, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, box_code_size=box_code_size, share_box_across_classes=share_box_across_classes) class_prediction_head = class_head.MaskRCNNClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob) third_stage_heads = {} if predict_instance_masks: third_stage_heads[ mask_rcnn_box_predictor. MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead( num_classes=num_classes, conv_hyperparams_fn=conv_hyperparams_fn, mask_height=mask_height, mask_width=mask_width, mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, mask_prediction_conv_depth=mask_prediction_conv_depth, masks_are_class_agnostic=masks_are_class_agnostic, convolve_then_upsample=convolve_then_upsample_masks) return mask_rcnn_box_predictor.MaskRCNNBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, third_stage_heads=third_stage_heads) def build_score_converter(score_converter_config, is_training): """Builds score converter based on the config. Builds one of [tf.identity, tf.sigmoid] score converters based on the config and whether the BoxPredictor is for training or inference. Args: score_converter_config: box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter. is_training: Indicates whether the BoxPredictor is in training mode. Returns: Callable score converter op. Raises: ValueError: On unknown score converter. """ if score_converter_config == ( box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY): return tf.identity if score_converter_config == ( box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID): return tf.identity if is_training else tf.sigmoid raise ValueError('Unknown score converter.') BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange', ['min', 'max']) def build(argscope_fn, box_predictor_config, is_training, num_classes, add_background_class=True): """Builds box predictor based on the configuration. Builds box predictor based on the configuration. See box_predictor.proto for configurable options. Also, see box_predictor.py for more details. Args: argscope_fn: A function that takes the following inputs: * hyperparams_pb2.Hyperparams proto * a boolean indicating if the model is in training mode. and returns a tf slim argscope for Conv and FC hyperparameters. box_predictor_config: box_predictor_pb2.BoxPredictor proto containing configuration. is_training: Whether the models is in training mode. num_classes: Number of classes to predict. add_background_class: Whether to add an implicit background class. Returns: box_predictor: box_predictor.BoxPredictor object. Raises: ValueError: On unknown box predictor. """ if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): raise ValueError('box_predictor_config not of type ' 'box_predictor_pb2.BoxPredictor.') box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') if box_predictor_oneof == 'convolutional_box_predictor': config_box_predictor = box_predictor_config.convolutional_box_predictor conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) # Optionally apply clipping to box encodings, when box_encodings_clip_range # is set. box_encodings_clip_range = None if config_box_predictor.HasField('box_encodings_clip_range'): box_encodings_clip_range = BoxEncodingsClipRange( min=config_box_predictor.box_encodings_clip_range.min, max=config_box_predictor.box_encodings_clip_range.max) return build_convolutional_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams_fn=conv_hyperparams_fn, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), min_depth=config_box_predictor.min_depth, max_depth=config_box_predictor.max_depth, apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_depthwise=config_box_predictor.use_depthwise, box_encodings_clip_range=box_encodings_clip_range) if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': config_box_predictor = ( box_predictor_config.weight_shared_convolutional_box_predictor) conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( 'batch_norm') # During training phase, logits are used to compute the loss. Only apply # sigmoid at inference to make the inference graph TPU friendly. score_converter_fn = build_score_converter( config_box_predictor.score_converter, is_training) # Optionally apply clipping to box encodings, when box_encodings_clip_range # is set. box_encodings_clip_range = None if config_box_predictor.HasField('box_encodings_clip_range'): box_encodings_clip_range = BoxEncodingsClipRange( min=config_box_predictor.box_encodings_clip_range.min, max=config_box_predictor.box_encodings_clip_range.max) keyword_args = None return build_weight_shared_convolutional_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams_fn=conv_hyperparams_fn, depth=config_box_predictor.depth, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, share_prediction_tower=config_box_predictor.share_prediction_tower, apply_batch_norm=apply_batch_norm, use_depthwise=config_box_predictor.use_depthwise, score_converter_fn=score_converter_fn, box_encodings_clip_range=box_encodings_clip_range, keyword_args=keyword_args) if box_predictor_oneof == 'mask_rcnn_box_predictor': config_box_predictor = box_predictor_config.mask_rcnn_box_predictor fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams, is_training) conv_hyperparams_fn = None if config_box_predictor.HasField('conv_hyperparams'): conv_hyperparams_fn = argscope_fn( config_box_predictor.conv_hyperparams, is_training) return build_mask_rcnn_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, share_box_across_classes=( config_box_predictor.share_box_across_classes), predict_instance_masks=config_box_predictor.predict_instance_masks, conv_hyperparams_fn=conv_hyperparams_fn, mask_height=config_box_predictor.mask_height, mask_width=config_box_predictor.mask_width, mask_prediction_num_conv_layers=( config_box_predictor.mask_prediction_num_conv_layers), mask_prediction_conv_depth=( config_box_predictor.mask_prediction_conv_depth), masks_are_class_agnostic=( config_box_predictor.masks_are_class_agnostic), convolve_then_upsample_masks=( config_box_predictor.convolve_then_upsample_masks)) if box_predictor_oneof == 'rfcn_box_predictor': config_box_predictor = box_predictor_config.rfcn_box_predictor conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor( is_training=is_training, num_classes=num_classes, conv_hyperparams_fn=conv_hyperparams_fn, crop_size=[config_box_predictor.crop_height, config_box_predictor.crop_width], num_spatial_bins=[config_box_predictor.num_spatial_bins_height, config_box_predictor.num_spatial_bins_width], depth=config_box_predictor.depth, box_code_size=config_box_predictor.box_code_size) return box_predictor_object raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof)) def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update, num_predictions_per_location_list, box_predictor_config, is_training, num_classes, add_background_class=True): """Builds a Keras-based box predictor based on the configuration. Builds Keras-based box predictor based on the configuration. See box_predictor.proto for configurable options. Also, see box_predictor.py for more details. Args: hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams proto and returns a `hyperparams_builder.KerasLayerHyperparams` for Conv or FC hyperparameters. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. box_predictor_config: box_predictor_pb2.BoxPredictor proto containing configuration. is_training: Whether the models is in training mode. num_classes: Number of classes to predict. add_background_class: Whether to add an implicit background class. Returns: box_predictor: box_predictor.KerasBoxPredictor object. Raises: ValueError: On unknown box predictor, or one with no Keras box predictor. """ if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): raise ValueError('box_predictor_config not of type ' 'box_predictor_pb2.BoxPredictor.') box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') if box_predictor_oneof == 'convolutional_box_predictor': config_box_predictor = box_predictor_config.convolutional_box_predictor conv_hyperparams = hyperparams_fn( config_box_predictor.conv_hyperparams) # Optionally apply clipping to box encodings, when box_encodings_clip_range # is set. box_encodings_clip_range = None if config_box_predictor.HasField('box_encodings_clip_range'): box_encodings_clip_range = BoxEncodingsClipRange( min=config_box_predictor.box_encodings_clip_range.min, max=config_box_predictor.box_encodings_clip_range.max) return build_convolutional_keras_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, num_predictions_per_location_list=num_predictions_per_location_list, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), min_depth=config_box_predictor.min_depth, max_depth=config_box_predictor.max_depth, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_depthwise=config_box_predictor.use_depthwise, box_encodings_clip_range=box_encodings_clip_range) if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': config_box_predictor = ( box_predictor_config.weight_shared_convolutional_box_predictor) conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( 'batch_norm') # During training phase, logits are used to compute the loss. Only apply # sigmoid at inference to make the inference graph TPU friendly. This is # required because during TPU inference, model.postprocess is not called. score_converter_fn = build_score_converter( config_box_predictor.score_converter, is_training) # Optionally apply clipping to box encodings, when box_encodings_clip_range # is set. box_encodings_clip_range = None if config_box_predictor.HasField('box_encodings_clip_range'): box_encodings_clip_range = BoxEncodingsClipRange( min=config_box_predictor.box_encodings_clip_range.min, max=config_box_predictor.box_encodings_clip_range.max) keyword_args = None return build_weight_shared_convolutional_keras_box_predictor( is_training=is_training, num_classes=num_classes, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, num_predictions_per_location_list=num_predictions_per_location_list, depth=config_box_predictor.depth, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, add_background_class=add_background_class, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, share_prediction_tower=config_box_predictor.share_prediction_tower, apply_batch_norm=apply_batch_norm, use_depthwise=config_box_predictor.use_depthwise, apply_conv_hyperparams_to_heads=( config_box_predictor.apply_conv_hyperparams_to_heads), apply_conv_hyperparams_pointwise=( config_box_predictor.apply_conv_hyperparams_pointwise), score_converter_fn=score_converter_fn, box_encodings_clip_range=box_encodings_clip_range, keyword_args=keyword_args) if box_predictor_oneof == 'mask_rcnn_box_predictor': config_box_predictor = box_predictor_config.mask_rcnn_box_predictor fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams) conv_hyperparams = None if config_box_predictor.HasField('conv_hyperparams'): conv_hyperparams = hyperparams_fn( config_box_predictor.conv_hyperparams) return build_mask_rcnn_keras_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, fc_hyperparams=fc_hyperparams, freeze_batchnorm=freeze_batchnorm, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, share_box_across_classes=( config_box_predictor.share_box_across_classes), predict_instance_masks=config_box_predictor.predict_instance_masks, conv_hyperparams=conv_hyperparams, mask_height=config_box_predictor.mask_height, mask_width=config_box_predictor.mask_width, mask_prediction_num_conv_layers=( config_box_predictor.mask_prediction_num_conv_layers), mask_prediction_conv_depth=( config_box_predictor.mask_prediction_conv_depth), masks_are_class_agnostic=( config_box_predictor.masks_are_class_agnostic), convolve_then_upsample_masks=( config_box_predictor.convolve_then_upsample_masks)) if box_predictor_oneof == 'rfcn_box_predictor': config_box_predictor = box_predictor_config.rfcn_box_predictor conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor( is_training=is_training, num_classes=num_classes, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, crop_size=[config_box_predictor.crop_height, config_box_predictor.crop_width], num_spatial_bins=[config_box_predictor.num_spatial_bins_height, config_box_predictor.num_spatial_bins_width], depth=config_box_predictor.depth, box_code_size=config_box_predictor.box_code_size) return box_predictor_object raise ValueError( 'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_predictor_builder.py
box_predictor_builder.py
"""A function to build an object detection box coder from configuration.""" from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.box_coders import square_box_coder from object_detection.protos import box_coder_pb2 def build(box_coder_config): """Builds a box coder object based on the box coder config. Args: box_coder_config: A box_coder.proto object containing the config for the desired box coder. Returns: BoxCoder based on the config. Raises: ValueError: On empty box coder proto. """ if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ box_coder_config.faster_rcnn_box_coder.y_scale, box_coder_config.faster_rcnn_box_coder.x_scale, box_coder_config.faster_rcnn_box_coder.height_scale, box_coder_config.faster_rcnn_box_coder.width_scale ]) if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': return keypoint_box_coder.KeypointBoxCoder( box_coder_config.keypoint_box_coder.num_keypoints, scale_factors=[ box_coder_config.keypoint_box_coder.y_scale, box_coder_config.keypoint_box_coder.x_scale, box_coder_config.keypoint_box_coder.height_scale, box_coder_config.keypoint_box_coder.width_scale ]) if (box_coder_config.WhichOneof('box_coder_oneof') == 'mean_stddev_box_coder'): return mean_stddev_box_coder.MeanStddevBoxCoder( stddev=box_coder_config.mean_stddev_box_coder.stddev) if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': return square_box_coder.SquareBoxCoder(scale_factors=[ box_coder_config.square_box_coder.y_scale, box_coder_config.square_box_coder.x_scale, box_coder_config.square_box_coder.length_scale ]) raise ValueError('Empty box coder.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/box_coder_builder.py
box_coder_builder.py
import tensorflow.compat.v1 as tf from object_detection.utils import learning_schedules from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from official.modeling.optimization import ema_optimizer # pylint: enable=g-import-not-at-top try: from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top except: # pylint: disable=bare-except pass def build_optimizers_tf_v1(optimizer_config, global_step=None): """Create a TF v1 compatible optimizer based on config. Args: optimizer_config: A Optimizer proto message. global_step: A variable representing the current step. If None, defaults to tf.train.get_or_create_global_step() Returns: An optimizer and a list of variables for summary. Raises: ValueError: when using an unsupported input data type. """ optimizer_type = optimizer_config.WhichOneof('optimizer') optimizer = None summary_vars = [] if optimizer_type == 'rms_prop_optimizer': config = optimizer_config.rms_prop_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.train.RMSPropOptimizer( learning_rate, decay=config.decay, momentum=config.momentum_optimizer_value, epsilon=config.epsilon) if optimizer_type == 'momentum_optimizer': config = optimizer_config.momentum_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate, momentum=config.momentum_optimizer_value) if optimizer_type == 'adam_optimizer': config = optimizer_config.adam_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon) if optimizer is None: raise ValueError('Optimizer %s not supported.' % optimizer_type) if optimizer_config.use_moving_average: optimizer = tf_opt.MovingAverageOptimizer( optimizer, average_decay=optimizer_config.moving_average_decay) return optimizer, summary_vars def build_optimizers_tf_v2(optimizer_config, global_step=None): """Create a TF v2 compatible optimizer based on config. Args: optimizer_config: A Optimizer proto message. global_step: A variable representing the current step. If None, defaults to tf.train.get_or_create_global_step() Returns: An optimizer and a list of variables for summary. Raises: ValueError: when using an unsupported input data type. """ optimizer_type = optimizer_config.WhichOneof('optimizer') optimizer = None summary_vars = [] if optimizer_type == 'rms_prop_optimizer': config = optimizer_config.rms_prop_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.keras.optimizers.RMSprop( learning_rate, decay=config.decay, momentum=config.momentum_optimizer_value, epsilon=config.epsilon) if optimizer_type == 'momentum_optimizer': config = optimizer_config.momentum_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.keras.optimizers.SGD( learning_rate, momentum=config.momentum_optimizer_value) if optimizer_type == 'adam_optimizer': config = optimizer_config.adam_optimizer learning_rate = _create_learning_rate(config.learning_rate, global_step=global_step) summary_vars.append(learning_rate) optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon) if optimizer is None: raise ValueError('Optimizer %s not supported.' % optimizer_type) if optimizer_config.use_moving_average: optimizer = ema_optimizer.ExponentialMovingAverage( optimizer=optimizer, average_decay=optimizer_config.moving_average_decay) return optimizer, summary_vars def build(config, global_step=None): if tf.executing_eagerly(): return build_optimizers_tf_v2(config, global_step) else: return build_optimizers_tf_v1(config, global_step) def _create_learning_rate(learning_rate_config, global_step=None): """Create optimizer learning rate based on config. Args: learning_rate_config: A LearningRate proto message. global_step: A variable representing the current step. If None, defaults to tf.train.get_or_create_global_step() Returns: A learning rate. Raises: ValueError: when using an unsupported input data type. """ if global_step is None: global_step = tf.train.get_or_create_global_step() learning_rate = None learning_rate_type = learning_rate_config.WhichOneof('learning_rate') if learning_rate_type == 'constant_learning_rate': config = learning_rate_config.constant_learning_rate learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, name='learning_rate') if learning_rate_type == 'exponential_decay_learning_rate': config = learning_rate_config.exponential_decay_learning_rate learning_rate = learning_schedules.exponential_decay_with_burnin( global_step, config.initial_learning_rate, config.decay_steps, config.decay_factor, burnin_learning_rate=config.burnin_learning_rate, burnin_steps=config.burnin_steps, min_learning_rate=config.min_learning_rate, staircase=config.staircase) if learning_rate_type == 'manual_step_learning_rate': config = learning_rate_config.manual_step_learning_rate if not config.schedule: raise ValueError('Empty learning rate schedule.') learning_rate_step_boundaries = [x.step for x in config.schedule] learning_rate_sequence = [config.initial_learning_rate] learning_rate_sequence += [x.learning_rate for x in config.schedule] learning_rate = learning_schedules.manual_stepping( global_step, learning_rate_step_boundaries, learning_rate_sequence, config.warmup) if learning_rate_type == 'cosine_decay_learning_rate': config = learning_rate_config.cosine_decay_learning_rate learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, config.learning_rate_base, config.total_steps, config.warmup_learning_rate, config.warmup_steps, config.hold_base_rate_steps) if learning_rate is None: raise ValueError('Learning_rate %s not supported.' % learning_rate_type) return learning_rate
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/optimizer_builder.py
optimizer_builder.py
"""Builder function to construct tf-slim arg_scope for convolution, fc ops.""" import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import freezable_batch_norm from object_detection.protos import hyperparams_pb2 from object_detection.utils import context_manager from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from object_detection.core import freezable_sync_batch_norm # pylint: enable=g-import-not-at-top class KerasLayerHyperparams(object): """ A hyperparameter configuration object for Keras layers used in Object Detection models. """ def __init__(self, hyperparams_config): """Builds keras hyperparameter config for layers based on the proto config. It automatically converts from Slim layer hyperparameter configs to Keras layer hyperparameters. Namely, it: - Builds Keras initializers/regularizers instead of Slim ones - sets weights_regularizer/initializer to kernel_regularizer/initializer - converts batchnorm decay to momentum - converts Slim l2 regularizer weights to the equivalent Keras l2 weights Contains a hyperparameter configuration for ops that specifies kernel initializer, kernel regularizer, activation. Also contains parameters for batch norm operators based on the configuration. Note that if the batch_norm parameters are not specified in the config (i.e. left to default) then batch norm is excluded from the config. Args: hyperparams_config: hyperparams.proto object containing hyperparameters. Raises: ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. """ if not isinstance(hyperparams_config, hyperparams_pb2.Hyperparams): raise ValueError('hyperparams_config not of type ' 'hyperparams_pb.Hyperparams.') self._batch_norm_params = None self._use_sync_batch_norm = False if hyperparams_config.HasField('batch_norm'): self._batch_norm_params = _build_keras_batch_norm_params( hyperparams_config.batch_norm) elif hyperparams_config.HasField('sync_batch_norm'): self._use_sync_batch_norm = True self._batch_norm_params = _build_keras_batch_norm_params( hyperparams_config.sync_batch_norm) self._force_use_bias = hyperparams_config.force_use_bias self._activation_fn = _build_activation_fn(hyperparams_config.activation) # TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv # (Those might use depthwise_* instead of kernel_*) # We should probably switch to using build_conv2d_layer and # build_depthwise_conv2d_layer methods instead. self._op_params = { 'kernel_regularizer': _build_keras_regularizer( hyperparams_config.regularizer), 'kernel_initializer': _build_initializer( hyperparams_config.initializer, build_for_keras=True), 'activation': _build_activation_fn(hyperparams_config.activation) } def use_batch_norm(self): return self._batch_norm_params is not None def use_sync_batch_norm(self): return self._use_sync_batch_norm def force_use_bias(self): return self._force_use_bias def use_bias(self): return (self._force_use_bias or not (self.use_batch_norm() and self.batch_norm_params()['center'])) def batch_norm_params(self, **overrides): """Returns a dict containing batchnorm layer construction hyperparameters. Optionally overrides values in the batchnorm hyperparam dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: **overrides: keyword arguments to override in the hyperparams dictionary Returns: dict containing the layer construction keyword arguments, with values overridden by the `overrides` keyword arguments. """ if self._batch_norm_params is None: new_batch_norm_params = dict() else: new_batch_norm_params = self._batch_norm_params.copy() new_batch_norm_params.update(overrides) return new_batch_norm_params def build_batch_norm(self, training=None, **overrides): """Returns a Batch Normalization layer with the appropriate hyperparams. If the hyperparams are configured to not use batch normalization, this will return a Keras Lambda layer that only applies tf.Identity, without doing any normalization. Optionally overrides values in the batch_norm hyperparam dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: training: if True, the normalization layer will normalize using the batch statistics. If False, the normalization layer will be frozen and will act as if it is being used for inference. If None, the layer will look up the Keras learning phase at `call` time to decide what to do. **overrides: batch normalization construction args to override from the batch_norm hyperparams dictionary. Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True), or a Keras Lambda layer that applies the identity (if use_batch_norm() is False) """ if self.use_batch_norm(): if self._use_sync_batch_norm: return freezable_sync_batch_norm.FreezableSyncBatchNorm( training=training, **self.batch_norm_params(**overrides)) else: return freezable_batch_norm.FreezableBatchNorm( training=training, **self.batch_norm_params(**overrides)) else: return tf.keras.layers.Lambda(tf.identity) def build_activation_layer(self, name='activation'): """Returns a Keras layer that applies the desired activation function. Args: name: The name to assign the Keras layer. Returns: A Keras lambda layer that applies the activation function specified in the hyperparam config, or applies the identity if the activation function is None. """ if self._activation_fn: return tf.keras.layers.Lambda(self._activation_fn, name=name) else: return tf.keras.layers.Lambda(tf.identity, name=name) def get_regularizer_weight(self): """Returns the l1 or l2 regularizer weight. Returns: A float value corresponding to the l1 or l2 regularization weight, or None if neither l1 or l2 regularization is defined. """ regularizer = self._op_params['kernel_regularizer'] if hasattr(regularizer, 'l1'): return float(regularizer.l1) elif hasattr(regularizer, 'l2'): return float(regularizer.l2) else: return None def params(self, include_activation=False, **overrides): """Returns a dict containing the layer construction hyperparameters to use. Optionally overrides values in the returned dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: include_activation: If False, activation in the returned dictionary will be set to `None`, and the activation must be applied via a separate layer created by `build_activation_layer`. If True, `activation` in the output param dictionary will be set to the activation function specified in the hyperparams config. **overrides: keyword arguments to override in the hyperparams dictionary. Returns: dict containing the layer construction keyword arguments, with values overridden by the `overrides` keyword arguments. """ new_params = self._op_params.copy() new_params['activation'] = None if include_activation: new_params['activation'] = self._activation_fn new_params['use_bias'] = self.use_bias() new_params.update(**overrides) return new_params def build(hyperparams_config, is_training): """Builds tf-slim arg_scope for convolution ops based on the config. Returns an arg_scope to use for convolution ops containing weights initializer, weights regularizer, activation function, batch norm function and batch norm parameters based on the configuration. Note that if no normalization parameters are specified in the config, (i.e. left to default) then both batch norm and group norm are excluded from the arg_scope. The batch norm parameters are set for updates based on `is_training` argument and conv_hyperparams_config.batch_norm.train parameter. During training, they are updated only if batch_norm.train parameter is true. However, during eval, no updates are made to the batch norm variables. In both cases, their current values are used during forward pass. Args: hyperparams_config: hyperparams.proto object containing hyperparameters. is_training: Whether the network is in training mode. Returns: arg_scope_fn: A function to construct tf-slim arg_scope containing hyperparameters for ops. Raises: ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. """ if not isinstance(hyperparams_config, hyperparams_pb2.Hyperparams): raise ValueError('hyperparams_config not of type ' 'hyperparams_pb.Hyperparams.') if hyperparams_config.force_use_bias: raise ValueError('Hyperparams force_use_bias only supported by ' 'KerasLayerHyperparams.') if hyperparams_config.HasField('sync_batch_norm'): raise ValueError('Hyperparams sync_batch_norm only supported by ' 'KerasLayerHyperparams.') normalizer_fn = None batch_norm_params = None if hyperparams_config.HasField('batch_norm'): normalizer_fn = slim.batch_norm batch_norm_params = _build_batch_norm_params( hyperparams_config.batch_norm, is_training) if hyperparams_config.HasField('group_norm'): normalizer_fn = slim.group_norm affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose] if hyperparams_config.HasField('op') and ( hyperparams_config.op == hyperparams_pb2.Hyperparams.FC): affected_ops = [slim.fully_connected] def scope_fn(): with (slim.arg_scope([slim.batch_norm], **batch_norm_params) if batch_norm_params is not None else context_manager.IdentityContextManager()): with slim.arg_scope( affected_ops, weights_regularizer=_build_slim_regularizer( hyperparams_config.regularizer), weights_initializer=_build_initializer( hyperparams_config.initializer), activation_fn=_build_activation_fn(hyperparams_config.activation), normalizer_fn=normalizer_fn) as sc: return sc return scope_fn def _build_activation_fn(activation_fn): """Builds a callable activation from config. Args: activation_fn: hyperparams_pb2.Hyperparams.activation Returns: Callable activation function. Raises: ValueError: On unknown activation function. """ if activation_fn == hyperparams_pb2.Hyperparams.NONE: return None if activation_fn == hyperparams_pb2.Hyperparams.RELU: return tf.nn.relu if activation_fn == hyperparams_pb2.Hyperparams.RELU_6: return tf.nn.relu6 if activation_fn == hyperparams_pb2.Hyperparams.SWISH: return tf.nn.swish raise ValueError('Unknown activation function: {}'.format(activation_fn)) def _build_slim_regularizer(regularizer): """Builds a tf-slim regularizer from config. Args: regularizer: hyperparams_pb2.Hyperparams.regularizer proto. Returns: tf-slim regularizer. Raises: ValueError: On unknown regularizer. """ regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') if regularizer_oneof == 'l1_regularizer': return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight)) if regularizer_oneof == 'l2_regularizer': return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight)) if regularizer_oneof is None: return None raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) def _build_keras_regularizer(regularizer): """Builds a keras regularizer from config. Args: regularizer: hyperparams_pb2.Hyperparams.regularizer proto. Returns: Keras regularizer. Raises: ValueError: On unknown regularizer. """ regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') if regularizer_oneof == 'l1_regularizer': return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight)) if regularizer_oneof == 'l2_regularizer': # The Keras L2 regularizer weight differs from the Slim L2 regularizer # weight by a factor of 2 return tf.keras.regularizers.l2( float(regularizer.l2_regularizer.weight * 0.5)) if regularizer_oneof is None: return None raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) def _build_initializer(initializer, build_for_keras=False): """Build a tf initializer from config. Args: initializer: hyperparams_pb2.Hyperparams.regularizer proto. build_for_keras: Whether the initializers should be built for Keras operators. If false builds for Slim. Returns: tf initializer or string corresponding to the tf keras initializer name. Raises: ValueError: On unknown initializer. """ initializer_oneof = initializer.WhichOneof('initializer_oneof') if initializer_oneof == 'truncated_normal_initializer': return tf.truncated_normal_initializer( mean=initializer.truncated_normal_initializer.mean, stddev=initializer.truncated_normal_initializer.stddev) if initializer_oneof == 'random_normal_initializer': return tf.random_normal_initializer( mean=initializer.random_normal_initializer.mean, stddev=initializer.random_normal_initializer.stddev) if initializer_oneof == 'variance_scaling_initializer': enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer. DESCRIPTOR.enum_types_by_name['Mode']) mode = enum_descriptor.values_by_number[initializer. variance_scaling_initializer. mode].name if build_for_keras: if initializer.variance_scaling_initializer.uniform: return tf.variance_scaling_initializer( scale=initializer.variance_scaling_initializer.factor, mode=mode.lower(), distribution='uniform') else: # In TF 1.9 release and earlier, the truncated_normal distribution was # not supported correctly. So, in these earlier versions of tensorflow, # the ValueError will be raised, and we manually truncate the # distribution scale. # # It is insufficient to just set distribution to `normal` from the # start, because the `normal` distribution in newer Tensorflow versions # creates a truncated distribution, whereas it created untruncated # distributions in older versions. try: return tf.variance_scaling_initializer( scale=initializer.variance_scaling_initializer.factor, mode=mode.lower(), distribution='truncated_normal') except ValueError: truncate_constant = 0.87962566103423978 truncated_scale = initializer.variance_scaling_initializer.factor / ( truncate_constant * truncate_constant ) return tf.variance_scaling_initializer( scale=truncated_scale, mode=mode.lower(), distribution='normal') else: return slim.variance_scaling_initializer( factor=initializer.variance_scaling_initializer.factor, mode=mode, uniform=initializer.variance_scaling_initializer.uniform) if initializer_oneof == 'keras_initializer_by_name': if build_for_keras: return initializer.keras_initializer_by_name else: raise ValueError( 'Unsupported non-Keras usage of keras_initializer_by_name: {}'.format( initializer.keras_initializer_by_name)) if initializer_oneof is None: return None raise ValueError('Unknown initializer function: {}'.format( initializer_oneof)) def _build_batch_norm_params(batch_norm, is_training): """Build a dictionary of batch_norm params from config. Args: batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. is_training: Whether the models is in training mode. Returns: A dictionary containing batch_norm parameters. """ batch_norm_params = { 'decay': batch_norm.decay, 'center': batch_norm.center, 'scale': batch_norm.scale, 'epsilon': batch_norm.epsilon, # Remove is_training parameter from here and deprecate it in the proto # once we refactor Faster RCNN models to set is_training through an outer # arg_scope in the meta architecture. 'is_training': is_training and batch_norm.train, } return batch_norm_params def _build_keras_batch_norm_params(batch_norm): """Build a dictionary of Keras BatchNormalization params from config. Args: batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. Returns: A dictionary containing Keras BatchNormalization parameters. """ # Note: Although decay is defined to be 1 - momentum in batch_norm, # decay in the slim batch_norm layers was erroneously defined and is # actually the same as momentum in the Keras batch_norm layers. # For context, see: github.com/keras-team/keras/issues/6839 batch_norm_params = { 'momentum': batch_norm.decay, 'center': batch_norm.center, 'scale': batch_norm.scale, 'epsilon': batch_norm.epsilon, } return batch_norm_params
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/hyperparams_builder.py
hyperparams_builder.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import math import tensorflow.compat.v1 as tf from object_detection.builders import decoder_builder from object_detection.protos import input_reader_pb2 def make_initializable_iterator(dataset): """Creates an iterator, and initializes tables. This is useful in cases where make_one_shot_iterator wouldn't work because the graph contains a hash table that needs to be initialized. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.data.Iterator`. """ iterator = dataset.make_initializable_iterator() tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) return iterator def _read_dataset_internal(file_read_func, input_files, num_readers, config, filename_shard_fn=None): """Reads a dataset, and handles repetition and shuffling. Args: file_read_func: Function to use in tf_data.parallel_interleave, to read every individual file into a tf.data.Dataset. input_files: A list of file paths to read. num_readers: Number of readers to use. config: A input_reader_builder.InputReader object. filename_shard_fn: optional, A function used to shard filenames across replicas. This function takes as input a TF dataset of filenames and is expected to return its sharded version. It is useful when the dataset is being loaded on one of possibly many replicas and we want to evenly shard the files between the replicas. Returns: A tf.data.Dataset of (undecoded) tf-records based on config. Raises: RuntimeError: If no files are found at the supplied path(s). """ filenames = tf.gfile.Glob(input_files) tf.logging.info('Reading record datasets for input file: %s' % input_files) tf.logging.info('Number of filenames to read: %s' % len(filenames)) if not filenames: raise RuntimeError('Did not find any input files matching the glob pattern ' '{}'.format(input_files)) if num_readers > len(filenames): num_readers = len(filenames) tf.logging.warning('num_readers has been reduced to %d to match input file ' 'shards.' % num_readers) filename_dataset = tf.data.Dataset.from_tensor_slices(filenames) if config.shuffle: filename_dataset = filename_dataset.shuffle( config.filenames_shuffle_buffer_size) elif num_readers > 1: tf.logging.warning('`shuffle` is false, but the input data stream is ' 'still slightly shuffled since `num_readers` > 1.') if filename_shard_fn: filename_dataset = filename_shard_fn(filename_dataset) filename_dataset = filename_dataset.repeat(config.num_epochs or None) records_dataset = filename_dataset.apply( tf.data.experimental.parallel_interleave( file_read_func, cycle_length=num_readers, block_length=config.read_block_length, sloppy=config.shuffle)) if config.shuffle: records_dataset = records_dataset.shuffle(config.shuffle_buffer_size) return records_dataset def read_dataset(file_read_func, input_files, config, filename_shard_fn=None): """Reads multiple datasets with sampling. Args: file_read_func: Function to use in tf_data.parallel_interleave, to read every individual file into a tf.data.Dataset. input_files: A list of file paths to read. config: A input_reader_builder.InputReader object. filename_shard_fn: optional, A function used to shard filenames across replicas. This function takes as input a TF dataset of filenames and is expected to return its sharded version. It is useful when the dataset is being loaded on one of possibly many replicas and we want to evenly shard the files between the replicas. Returns: A tf.data.Dataset of (undecoded) tf-records based on config. Raises: RuntimeError: If no files are found at the supplied path(s). """ if config.sample_from_datasets_weights: tf.logging.info('Reading weighted datasets: %s' % input_files) if len(input_files) != len(config.sample_from_datasets_weights): raise ValueError('Expected the number of input files to be the same as ' 'the number of dataset sample weights. But got ' '[input_files, sample_from_datasets_weights]: [' + input_files + ', ' + str(config.sample_from_datasets_weights) + ']') tf.logging.info('Sampling from datasets %s with weights %s' % (input_files, config.sample_from_datasets_weights)) records_datasets = [] dataset_weights = [] for i, input_file in enumerate(input_files): weight = config.sample_from_datasets_weights[i] num_readers = math.ceil(config.num_readers * weight / sum(config.sample_from_datasets_weights)) tf.logging.info( 'Num readers for dataset [%s]: %d', input_file, num_readers) if num_readers == 0: tf.logging.info('Skipping dataset due to zero weights: %s', input_file) continue tf.logging.info( 'Num readers for dataset [%s]: %d', input_file, num_readers) records_dataset = _read_dataset_internal(file_read_func, [input_file], num_readers, config, filename_shard_fn) dataset_weights.append(weight) records_datasets.append(records_dataset) return tf.data.experimental.sample_from_datasets(records_datasets, dataset_weights) else: tf.logging.info('Reading unweighted datasets: %s' % input_files) return _read_dataset_internal(file_read_func, input_files, config.num_readers, config, filename_shard_fn) def shard_function_for_context(input_context): """Returns a function that shards filenames based on the input context.""" if input_context is None: return None def shard_fn(dataset): return dataset.shard( input_context.num_input_pipelines, input_context.input_pipeline_id) return shard_fn def build(input_reader_config, batch_size=None, transform_input_data_fn=None, input_context=None, reduce_to_frame_fn=None): """Builds a tf.data.Dataset. Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all records. Applies a padded batch to the resulting dataset. Args: input_reader_config: A input_reader_pb2.InputReader object. batch_size: Batch size. If batch size is None, no batching is performed. transform_input_data_fn: Function to apply transformation to all records, or None if no extra decoding is required. input_context: optional, A tf.distribute.InputContext object used to shard filenames and compute per-replica batch_size when this function is being called per-replica. reduce_to_frame_fn: Function that extracts frames from tf.SequenceExample type input data. Returns: A tf.data.Dataset based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') decoder = decoder_builder.build(input_reader_config) if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': config = input_reader_config.tf_record_input_reader if not config.input_path: raise ValueError('At least one input path must be specified in ' '`input_reader_config`.') def dataset_map_fn(dataset, fn_to_map, batch_size=None, input_reader_config=None): """Handles whether or not to use the legacy map function. Args: dataset: A tf.Dataset. fn_to_map: The function to be mapped for that dataset. batch_size: Batch size. If batch size is None, no batching is performed. input_reader_config: A input_reader_pb2.InputReader object. Returns: A tf.data.Dataset mapped with fn_to_map. """ if hasattr(dataset, 'map_with_legacy_function'): if batch_size: num_parallel_calls = batch_size * ( input_reader_config.num_parallel_batches) else: num_parallel_calls = input_reader_config.num_parallel_map_calls dataset = dataset.map_with_legacy_function( fn_to_map, num_parallel_calls=num_parallel_calls) else: dataset = dataset.map(fn_to_map, tf.data.experimental.AUTOTUNE) return dataset shard_fn = shard_function_for_context(input_context) if input_context is not None: batch_size = input_context.get_per_replica_batch_size(batch_size) dataset = read_dataset( functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000), config.input_path[:], input_reader_config, filename_shard_fn=shard_fn) if input_reader_config.sample_1_of_n_examples > 1: dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0) # TODO(rathodv): make batch size a required argument once the old binaries # are deleted. dataset = dataset_map_fn(dataset, decoder.decode, batch_size, input_reader_config) if reduce_to_frame_fn: dataset = reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, input_reader_config) if transform_input_data_fn is not None: dataset = dataset_map_fn(dataset, transform_input_data_fn, batch_size, input_reader_config) if batch_size: dataset = dataset.batch(batch_size, drop_remainder=input_reader_config.drop_remainder) dataset = dataset.prefetch(input_reader_config.num_prefetch_batches) return dataset raise ValueError('Unsupported input_reader_config.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/dataset_builder.py
dataset_builder.py
import tensorflow.compat.v1 as tf from object_detection.utils import shape_utils def _find_interval_containing_new_value(x, new_value): """Find the index of x (ascending-ordered) after which new_value occurs.""" new_value_shape = shape_utils.combined_static_and_dynamic_shape(new_value)[0] x_shape = shape_utils.combined_static_and_dynamic_shape(x)[0] compare = tf.cast(tf.reshape(new_value, shape=(new_value_shape, 1)) >= tf.reshape(x, shape=(1, x_shape)), dtype=tf.int32) diff = compare[:, 1:] - compare[:, :-1] interval_idx = tf.argmin(diff, axis=1) return interval_idx def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y): """Tensorflow implementation of 1d linear interpolation. Args: x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d linear interpolation is performed. fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape (length,) used as the domain to approximate a function. fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate a function. Returns: tf.float32 Tensor of shape (num_examples,) """ x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0) y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0) interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate) # Interpolate alpha = ( (x_to_interpolate - tf.gather(x_pad, interval_idx)) / (tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx))) interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) + alpha * tf.gather(y_pad, interval_idx + 1)) return interpolation def _function_approximation_proto_to_tf_tensors(x_y_pairs_message): """Extracts (x,y) pairs from a XYPairs message. Args: x_y_pairs_message: calibration_pb2..XYPairs proto Returns: tf_x: tf.float32 tensor of shape (number_xy_pairs,) for function domain. tf_y: tf.float32 tensor of shape (number_xy_pairs,) for function range. """ tf_x = tf.convert_to_tensor([x_y_pair.x for x_y_pair in x_y_pairs_message.x_y_pair], dtype=tf.float32) tf_y = tf.convert_to_tensor([x_y_pair.y for x_y_pair in x_y_pairs_message.x_y_pair], dtype=tf.float32) return tf_x, tf_y def _get_class_id_function_dict(calibration_config): """Create a dictionary mapping class id to function approximations. Args: calibration_config: calibration_pb2 proto containing id_function_approximations. Returns: Dictionary mapping a class id to a tuple of TF tensors to be used for function approximation. """ class_id_function_dict = {} class_id_xy_pairs_map = ( calibration_config.class_id_function_approximations.class_id_xy_pairs_map) for class_id in class_id_xy_pairs_map: class_id_function_dict[class_id] = ( _function_approximation_proto_to_tf_tensors( class_id_xy_pairs_map[class_id])) return class_id_function_dict def build(calibration_config): """Returns a function that calibrates Tensorflow model scores. All returned functions are expected to apply positive monotonic transformations to inputs (i.e. score ordering is strictly preserved or adjacent scores are mapped to the same score, but an input of lower value should never be exceed an input of higher value after transformation). For class-agnostic calibration, positive monotonicity should hold across all scores. In class-specific cases, positive monotonicity should hold within each class. Args: calibration_config: calibration_pb2.CalibrationConfig proto. Returns: Function that that accepts class_predictions_with_background and calibrates the output based on calibration_config's parameters. Raises: ValueError: No calibration builder defined for "Oneof" in calibration_config. """ # Linear Interpolation (usually used as a result of calibration via # isotonic regression). if calibration_config.WhichOneof('calibrator') == 'function_approximation': def calibration_fn(class_predictions_with_background): """Calibrate predictions via 1-d linear interpolation. Predictions scores are linearly interpolated based on a class-agnostic function approximation. Note that the 0-indexed background class is also transformed. Args: class_predictions_with_background: tf.float32 tensor of shape [batch_size, num_anchors, num_classes + 1] containing scores on the interval [0,1]. This is usually produced by a sigmoid or softmax layer and the result of calling the `predict` method of a detection model. Returns: tf.float32 tensor of the same shape as the input with values on the interval [0, 1]. """ # Flattening Tensors and then reshaping at the end. flat_class_predictions_with_background = tf.reshape( class_predictions_with_background, shape=[-1]) fn_x, fn_y = _function_approximation_proto_to_tf_tensors( calibration_config.function_approximation.x_y_pairs) updated_scores = _tf_linear_interp1d( flat_class_predictions_with_background, fn_x, fn_y) # Un-flatten the scores original_detections_shape = shape_utils.combined_static_and_dynamic_shape( class_predictions_with_background) calibrated_class_predictions_with_background = tf.reshape( updated_scores, shape=original_detections_shape, name='calibrate_scores') return calibrated_class_predictions_with_background elif (calibration_config.WhichOneof('calibrator') == 'class_id_function_approximations'): def calibration_fn(class_predictions_with_background): """Calibrate predictions per class via 1-d linear interpolation. Prediction scores are linearly interpolated with class-specific function approximations. Note that after calibration, an anchor's class scores will not necessarily sum to 1, and score ordering may change, depending on each class' calibration parameters. Args: class_predictions_with_background: tf.float32 tensor of shape [batch_size, num_anchors, num_classes + 1] containing scores on the interval [0,1]. This is usually produced by a sigmoid or softmax layer and the result of calling the `predict` method of a detection model. Returns: tf.float32 tensor of the same shape as the input with values on the interval [0, 1]. Raises: KeyError: Calibration parameters are not present for a class. """ class_id_function_dict = _get_class_id_function_dict(calibration_config) # Tensors are split by class and then recombined at the end to recover # the input's original shape. If a class id does not have calibration # parameters, it is left unchanged. class_tensors = tf.unstack(class_predictions_with_background, axis=-1) calibrated_class_tensors = [] for class_id, class_tensor in enumerate(class_tensors): flat_class_tensor = tf.reshape(class_tensor, shape=[-1]) if class_id in class_id_function_dict: output_tensor = _tf_linear_interp1d( x_to_interpolate=flat_class_tensor, fn_x=class_id_function_dict[class_id][0], fn_y=class_id_function_dict[class_id][1]) else: tf.logging.info( 'Calibration parameters for class id `%d` not not found', class_id) output_tensor = flat_class_tensor calibrated_class_tensors.append(output_tensor) combined_calibrated_tensor = tf.stack(calibrated_class_tensors, axis=1) input_shape = shape_utils.combined_static_and_dynamic_shape( class_predictions_with_background) calibrated_class_predictions_with_background = tf.reshape( combined_calibrated_tensor, shape=input_shape, name='calibrate_scores') return calibrated_class_predictions_with_background elif (calibration_config.WhichOneof('calibrator') == 'temperature_scaling_calibration'): def calibration_fn(class_predictions_with_background): """Calibrate predictions via temperature scaling. Predictions logits scores are scaled by the temperature scaler. Note that the 0-indexed background class is also transformed. Args: class_predictions_with_background: tf.float32 tensor of shape [batch_size, num_anchors, num_classes + 1] containing logits scores. This is usually produced before a sigmoid or softmax layer. Returns: tf.float32 tensor of the same shape as the input. Raises: ValueError: If temperature scaler is of incorrect value. """ scaler = calibration_config.temperature_scaling_calibration.scaler if scaler <= 0: raise ValueError('The scaler in temperature scaling must be positive.') calibrated_class_predictions_with_background = tf.math.divide( class_predictions_with_background, scaler, name='calibrate_score') return calibrated_class_predictions_with_background # TODO(zbeaver): Add sigmoid calibration. else: raise ValueError('No calibration builder defined for "Oneof" in ' 'calibration_config.') return calibration_fn
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/calibration_builder.py
calibration_builder.py
"""Builder function for image resizing operations.""" import functools import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.protos import image_resizer_pb2 def _tf_resize_method(resize_method): """Maps image resize method from enumeration type to TensorFlow. Args: resize_method: The resize_method attribute of keep_aspect_ratio_resizer or fixed_shape_resizer. Returns: method: The corresponding TensorFlow ResizeMethod. Raises: ValueError: if `resize_method` is of unknown type. """ dict_method = { image_resizer_pb2.BILINEAR: tf.image.ResizeMethod.BILINEAR, image_resizer_pb2.NEAREST_NEIGHBOR: tf.image.ResizeMethod.NEAREST_NEIGHBOR, image_resizer_pb2.BICUBIC: tf.image.ResizeMethod.BICUBIC, image_resizer_pb2.AREA: tf.image.ResizeMethod.AREA } if resize_method in dict_method: return dict_method[resize_method] else: raise ValueError('Unknown resize_method') def build(image_resizer_config): """Builds callable for image resizing operations. Args: image_resizer_config: image_resizer.proto object containing parameters for an image resizing operation. Returns: image_resizer_fn: Callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. Raises: ValueError: if `image_resizer_config` is of incorrect type. ValueError: if `image_resizer_config.image_resizer_oneof` is of expected type. ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer is used. """ if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer): raise ValueError('image_resizer_config not of type ' 'image_resizer_pb2.ImageResizer.') image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof') if image_resizer_oneof == 'keep_aspect_ratio_resizer': keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer if not (keep_aspect_ratio_config.min_dimension <= keep_aspect_ratio_config.max_dimension): raise ValueError('min_dimension > max_dimension') method = _tf_resize_method(keep_aspect_ratio_config.resize_method) per_channel_pad_value = (0, 0, 0) if keep_aspect_ratio_config.per_channel_pad_value: per_channel_pad_value = tuple(keep_aspect_ratio_config. per_channel_pad_value) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=keep_aspect_ratio_config.min_dimension, max_dimension=keep_aspect_ratio_config.max_dimension, method=method, pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension, per_channel_pad_value=per_channel_pad_value) if not keep_aspect_ratio_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'fixed_shape_resizer': fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer method = _tf_resize_method(fixed_shape_resizer_config.resize_method) image_resizer_fn = functools.partial( preprocessor.resize_image, new_height=fixed_shape_resizer_config.height, new_width=fixed_shape_resizer_config.width, method=method) if not fixed_shape_resizer_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'identity_resizer': def image_resizer_fn(image, masks=None, **kwargs): del kwargs if masks is None: return [image, tf.shape(image)] else: return [image, masks, tf.shape(image)] return image_resizer_fn elif image_resizer_oneof == 'conditional_shape_resizer': conditional_shape_resize_config = ( image_resizer_config.conditional_shape_resizer) method = _tf_resize_method(conditional_shape_resize_config.resize_method) if conditional_shape_resize_config.condition == ( image_resizer_pb2.ConditionalShapeResizer.GREATER): image_resizer_fn = functools.partial( preprocessor.resize_to_max_dimension, max_dimension=conditional_shape_resize_config.size_threshold, method=method) elif conditional_shape_resize_config.condition == ( image_resizer_pb2.ConditionalShapeResizer.SMALLER): image_resizer_fn = functools.partial( preprocessor.resize_to_min_dimension, min_dimension=conditional_shape_resize_config.size_threshold, method=method) else: raise ValueError( 'Invalid image resizer condition option for ' 'ConditionalShapeResizer: \'%s\'.' % conditional_shape_resize_config.condition) if not conditional_shape_resize_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'pad_to_multiple_resizer': pad_to_multiple_resizer_config = ( image_resizer_config.pad_to_multiple_resizer) if pad_to_multiple_resizer_config.multiple < 0: raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.') else: image_resizer_fn = functools.partial( preprocessor.resize_pad_to_multiple, multiple=pad_to_multiple_resizer_config.multiple) if not pad_to_multiple_resizer_config.convert_to_grayscale: return image_resizer_fn else: raise ValueError( 'Invalid image resizer option: \'%s\'.' % image_resizer_oneof) def grayscale_image_resizer(image, masks=None): """Convert to grayscale before applying image_resizer_fn. Args: image: A 3D tensor of shape [height, width, 3] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, 1], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. """ # image_resizer_fn returns [resized_image, resized_image_shape] if # mask==None, otherwise it returns # [resized_image, resized_mask, resized_image_shape]. In either case, we # only deal with first and last element of the returned list. retval = image_resizer_fn(image, masks) resized_image = retval[0] resized_image_shape = retval[-1] retval[0] = preprocessor.rgb_to_gray(resized_image) retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0) return retval return functools.partial(grayscale_image_resizer)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/image_resizer_builder.py
image_resizer_builder.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.data_decoders import tf_example_decoder from object_detection.data_decoders import tf_sequence_example_decoder from object_detection.protos import input_reader_pb2 parallel_reader = slim.parallel_reader def build(input_reader_config): """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': config = input_reader_config.tf_record_input_reader if not config.input_path: raise ValueError('At least one input path must be specified in ' '`input_reader_config`.') _, string_tensor = parallel_reader.parallel_read( config.input_path[:], # Convert `RepeatedScalarContainer` to list. reader_class=tf.TFRecordReader, num_epochs=(input_reader_config.num_epochs if input_reader_config.num_epochs else None), num_readers=input_reader_config.num_readers, shuffle=input_reader_config.shuffle, dtypes=[tf.string, tf.string], capacity=input_reader_config.queue_capacity, min_after_dequeue=input_reader_config.min_after_dequeue) label_map_proto_file = None if input_reader_config.HasField('label_map_path'): label_map_proto_file = input_reader_config.label_map_path input_type = input_reader_config.input_type if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=input_reader_config.load_instance_masks, instance_mask_type=input_reader_config.mask_type, label_map_proto_file=label_map_proto_file, load_context_features=input_reader_config.load_context_features) return decoder.decode(string_tensor) elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( label_map_proto_file=label_map_proto_file, load_context_features=input_reader_config.load_context_features, load_context_image_ids=input_reader_config.load_context_image_ids) return decoder.decode(string_tensor) raise ValueError('Unsupported input_type.') raise ValueError('Unsupported input_reader_config.')
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/input_reader_builder.py
input_reader_builder.py
"""Builder function for post processing operations.""" import functools import tensorflow.compat.v1 as tf from object_detection.builders import calibration_builder from object_detection.core import post_processing from object_detection.protos import post_processing_pb2 def build(post_processing_config): """Builds callables for post-processing operations. Builds callables for non-max suppression, score conversion, and (optionally) calibration based on the configuration. Non-max suppression callable takes `boxes`, `scores`, and optionally `clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns `nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See post_processing.batch_multiclass_non_max_suppression for the type and shape of these tensors. Score converter callable should be called with `input` tensor. The callable returns the output from one of 3 tf operations based on the configuration - tf.identity, tf.sigmoid or tf.nn.softmax. If a calibration config is provided, score_converter also applies calibration transformations, as defined in calibration_builder.py. See tensorflow documentation for argument and return value descriptions. Args: post_processing_config: post_processing.proto object containing the parameters for the post-processing operations. Returns: non_max_suppressor_fn: Callable for non-max suppression. score_converter_fn: Callable for score conversion. Raises: ValueError: if the post_processing_config is of incorrect type. """ if not isinstance(post_processing_config, post_processing_pb2.PostProcessing): raise ValueError('post_processing_config not of type ' 'post_processing_pb2.Postprocessing.') non_max_suppressor_fn = _build_non_max_suppressor( post_processing_config.batch_non_max_suppression) score_converter_fn = _build_score_converter( post_processing_config.score_converter, post_processing_config.logit_scale) if post_processing_config.HasField('calibration_config'): score_converter_fn = _build_calibrated_score_converter( score_converter_fn, post_processing_config.calibration_config) return non_max_suppressor_fn, score_converter_fn def _build_non_max_suppressor(nms_config): """Builds non-max suppresson based on the nms config. Args: nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto. Returns: non_max_suppressor_fn: Callable non-max suppressor. Raises: ValueError: On incorrect iou_threshold or on incompatible values of max_total_detections and max_detections_per_class or on negative soft_nms_sigma. """ if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0: raise ValueError('iou_threshold not in [0, 1.0].') if nms_config.max_detections_per_class > nms_config.max_total_detections: raise ValueError('max_detections_per_class should be no greater than ' 'max_total_detections.') if nms_config.soft_nms_sigma < 0.0: raise ValueError('soft_nms_sigma should be non-negative.') if nms_config.use_combined_nms and nms_config.use_class_agnostic_nms: raise ValueError('combined_nms does not support class_agnostic_nms.') non_max_suppressor_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=nms_config.score_threshold, iou_thresh=nms_config.iou_threshold, max_size_per_class=nms_config.max_detections_per_class, max_total_size=nms_config.max_total_detections, use_static_shapes=nms_config.use_static_shapes, use_class_agnostic_nms=nms_config.use_class_agnostic_nms, max_classes_per_detection=nms_config.max_classes_per_detection, soft_nms_sigma=nms_config.soft_nms_sigma, use_partitioned_nms=nms_config.use_partitioned_nms, use_combined_nms=nms_config.use_combined_nms, change_coordinate_frame=nms_config.change_coordinate_frame, use_hard_nms=nms_config.use_hard_nms, use_cpu_nms=nms_config.use_cpu_nms) return non_max_suppressor_fn def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale): """Create a function to scale logits then apply a Tensorflow function.""" def score_converter_fn(logits): scaled_logits = tf.multiply(logits, 1.0 / logit_scale, name='scale_logits') return tf_score_converter_fn(scaled_logits, name='convert_scores') score_converter_fn.__name__ = '%s_with_logit_scale' % ( tf_score_converter_fn.__name__) return score_converter_fn def _build_score_converter(score_converter_config, logit_scale): """Builds score converter based on the config. Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on the config. Args: score_converter_config: post_processing_pb2.PostProcessing.score_converter. logit_scale: temperature to use for SOFTMAX score_converter. Returns: Callable score converter op. Raises: ValueError: On unknown score converter. """ if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY: return _score_converter_fn_with_logit_scale(tf.identity, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID: return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX: return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale) raise ValueError('Unknown score converter.') def _build_calibrated_score_converter(score_converter_fn, calibration_config): """Wraps a score_converter_fn, adding a calibration step. Builds a score converter function with a calibration transformation according to calibration_builder.py. The score conversion function may be applied before or after the calibration transformation, depending on the calibration method. If the method is temperature scaling, the score conversion is after the calibration transformation. Otherwise, the score conversion is before the calibration transformation. Calibration applies positive monotonic transformations to inputs (i.e. score ordering is strictly preserved or adjacent scores are mapped to the same score). When calibration is class-agnostic, the highest-scoring class remains unchanged, unless two adjacent scores are mapped to the same value and one class arbitrarily selected to break the tie. In per-class calibration, it's possible (though rare in practice) that the highest-scoring class will change, since positive monotonicity is only required to hold within each class. Args: score_converter_fn: callable that takes logit scores as input. calibration_config: post_processing_pb2.PostProcessing.calibration_config. Returns: Callable calibrated score coverter op. """ calibration_fn = calibration_builder.build(calibration_config) def calibrated_score_converter_fn(logits): if (calibration_config.WhichOneof('calibrator') == 'temperature_scaling_calibration'): calibrated_logits = calibration_fn(logits) return score_converter_fn(calibrated_logits) else: converted_logits = score_converter_fn(logits) return calibration_fn(converted_logits) calibrated_score_converter_fn.__name__ = ( 'calibrate_with_%s' % calibration_config.WhichOneof('calibrator')) return calibrated_score_converter_fn
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/post_processing_builder.py
post_processing_builder.py
from abc import abstractmethod import tensorflow.compat.v1 as tf BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' MASK_PREDICTIONS = 'mask_predictions' class BoxPredictor(object): """BoxPredictor.""" def __init__(self, is_training, num_classes): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). """ self._is_training = is_training self._num_classes = num_classes @property def is_keras_model(self): return False @property def num_classes(self): return self._num_classes def predict(self, image_features, num_predictions_per_location, scope=None, **params): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. scope: Variable and Op scope name. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: If length of `image_features` is not equal to length of `num_predictions_per_location`. """ if len(image_features) != len(num_predictions_per_location): raise ValueError('image_feature and num_predictions_per_location must ' 'be of same length, found: {} vs {}'. format(len(image_features), len(num_predictions_per_location))) if scope is not None: with tf.variable_scope(scope): return self._predict(image_features, num_predictions_per_location, **params) return self._predict(image_features, num_predictions_per_location, **params) # TODO(rathodv): num_predictions_per_location could be moved to constructor. # This is currently only used by ConvolutionalBoxPredictor. @abstractmethod def _predict(self, image_features, num_predictions_per_location, **params): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ pass class KerasBoxPredictor(tf.keras.layers.Layer): """Keras-based BoxPredictor.""" def __init__(self, is_training, num_classes, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(KerasBoxPredictor, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update @property def is_keras_model(self): return True @property def num_classes(self): return self._num_classes def call(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ return self._predict(image_features, **kwargs) @abstractmethod def _predict(self, image_features, **kwargs): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ raise NotImplementedError
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/box_predictor.py
box_predictor.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import inspect import math import sys import six from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from tensorflow.python.ops import control_flow_ops from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import densepose_ops from object_detection.core import keypoint_ops from object_detection.core import preprocessor_cache from object_detection.core import standard_fields as fields from object_detection.utils import autoaugment_utils from object_detection.utils import ops from object_detection.utils import patch_ops from object_detection.utils import shape_utils def _apply_with_random_selector(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. return control_flow_ops.merge([func( control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case) for case in range(num_cases)])[0] def _apply_with_random_selector_tuples(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: A tuple of input tensors. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ num_inputs = len(x) generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. tuples = [list() for t in x] for case in range(num_cases): new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x] output = func(tuple(new_x), case) for j in range(num_inputs): tuples[j].append(output[j]) for i in range(num_inputs): tuples[i] = control_flow_ops.merge(tuples[i])[0] return tuple(tuples) def _get_or_create_preprocess_rand_vars(generator_func, function_id, preprocess_vars_cache, key=''): """Returns a tensor stored in preprocess_vars_cache or using generator_func. If the tensor was previously generated and appears in the PreprocessorCache, the previously generated tensor will be returned. Otherwise, a new tensor is generated using generator_func and stored in the cache. Args: generator_func: A 0-argument function that generates a tensor. function_id: identifier for the preprocessing function used. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: identifier for the variable stored. Returns: The generated tensor. """ if preprocess_vars_cache is not None: var = preprocess_vars_cache.get(function_id, key) if var is None: var = generator_func() preprocess_vars_cache.update(function_id, key, var) else: var = generator_func() return var def _random_integer(minval, maxval, seed): """Returns a random 0-D tensor between minval and maxval. Args: minval: minimum value of the random tensor. maxval: maximum value of the random tensor. seed: random seed. Returns: A random 0-D tensor between minval and maxval. """ return tf.random_uniform( [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) # TODO(mttang): This method is needed because the current # tf.image.rgb_to_grayscale method does not support quantization. Replace with # tf.image.rgb_to_grayscale after quantization support is added. def _rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. Args: images: The RGB tensor to convert. Last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = tf.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = tf.image.convert_image_dtype(images, tf.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] rank_1 = tf.expand_dims(tf.rank(images) - 1, 0) gray_float = tf.reduce_sum( flt_image * rgb_weights, rank_1, keep_dims=True) gray_float.set_shape(images.get_shape()[:-1].concatenate([1])) return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name) def normalize_image(image, original_minval, original_maxval, target_minval, target_maxval): """Normalizes pixel values in the image. Moves the pixel values from the current [original_minval, original_maxval] range to a the [target_minval, target_maxval] range. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels]. original_minval: current image minimum value. original_maxval: current image maximum value. target_minval: target image minimum value. target_maxval: target image maximum value. Returns: image: image which is the same shape as input image. """ with tf.name_scope('NormalizeImage', values=[image]): original_minval = float(original_minval) original_maxval = float(original_maxval) target_minval = float(target_minval) target_maxval = float(target_maxval) image = tf.cast(image, dtype=tf.float32) image = tf.subtract(image, original_minval) image = tf.multiply(image, (target_maxval - target_minval) / (original_maxval - original_minval)) image = tf.add(image, target_minval) return image def retain_boxes_above_threshold(boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, threshold=0.0): """Retains boxes whose label weight is above a given threshold. If the label weight for a box is missing (represented by NaN), the box is retained. The boxes that don't pass the threshold will not appear in the returned tensor. Args: boxes: float32 tensor of shape [num_instance, 4] representing boxes location in normalized coordinates. labels: rank 1 int32 tensor of shape [num_instance] containing the object classes. label_weights: float32 tensor of shape [num_instance] representing the weight for each box. label_confidences: float32 tensor of shape [num_instance] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. threshold: scalar python float. Returns: retained_boxes: [num_retained_instance, 4] retianed_labels: [num_retained_instance] retained_label_weights: [num_retained_instance] If multiclass_scores, masks, or keypoints are not None, the function also returns: retained_multiclass_scores: [num_retained_instance, num_classes] retained_masks: [num_retained_instance, height, width] retained_keypoints: [num_retained_instance, num_keypoints, 2] """ with tf.name_scope('RetainBoxesAboveThreshold', values=[boxes, labels, label_weights]): indices = tf.where( tf.logical_or(label_weights > threshold, tf.is_nan(label_weights))) indices = tf.squeeze(indices, axis=1) retained_boxes = tf.gather(boxes, indices) retained_labels = tf.gather(labels, indices) retained_label_weights = tf.gather(label_weights, indices) result = [retained_boxes, retained_labels, retained_label_weights] if label_confidences is not None: retained_label_confidences = tf.gather(label_confidences, indices) result.append(retained_label_confidences) if multiclass_scores is not None: retained_multiclass_scores = tf.gather(multiclass_scores, indices) result.append(retained_multiclass_scores) if masks is not None: retained_masks = tf.gather(masks, indices) result.append(retained_masks) if keypoints is not None: retained_keypoints = tf.gather(keypoints, indices) result.append(retained_keypoints) return result def drop_label_probabilistically(boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, dropped_label=None, drop_probability=0.0, seed=None): """Drops boxes of a certain label with probability drop_probability. Boxes of the label dropped_label will not appear in the returned tensor. Args: boxes: float32 tensor of shape [num_instance, 4] representing boxes location in normalized coordinates. labels: rank 1 int32 tensor of shape [num_instance] containing the object classes. label_weights: float32 tensor of shape [num_instance] representing the weight for each box. label_confidences: float32 tensor of shape [num_instance] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. dropped_label: int32 id of label to drop. drop_probability: float32 probability of dropping a label. seed: random seed. Returns: retained_boxes: [num_retained_instance, 4] retianed_labels: [num_retained_instance] retained_label_weights: [num_retained_instance] If multiclass_scores, masks, or keypoints are not None, the function also returns: retained_multiclass_scores: [num_retained_instance, num_classes] retained_masks: [num_retained_instance, height, width] retained_keypoints: [num_retained_instance, num_keypoints, 2] """ with tf.name_scope('DropLabelProbabilistically', values=[boxes, labels]): indices = tf.where( tf.logical_or( tf.random_uniform(tf.shape(labels), seed=seed) > drop_probability, tf.not_equal(labels, dropped_label))) indices = tf.squeeze(indices, axis=1) retained_boxes = tf.gather(boxes, indices) retained_labels = tf.gather(labels, indices) retained_label_weights = tf.gather(label_weights, indices) result = [retained_boxes, retained_labels, retained_label_weights] if label_confidences is not None: retained_label_confidences = tf.gather(label_confidences, indices) result.append(retained_label_confidences) if multiclass_scores is not None: retained_multiclass_scores = tf.gather(multiclass_scores, indices) result.append(retained_multiclass_scores) if masks is not None: retained_masks = tf.gather(masks, indices) result.append(retained_masks) if keypoints is not None: retained_keypoints = tf.gather(keypoints, indices) result.append(retained_keypoints) return result def remap_labels(labels, original_labels=None, new_label=None): """Remaps labels that have an id in original_labels to new_label. Args: labels: rank 1 int32 tensor of shape [num_instance] containing the object classes. original_labels: int list of original labels that should be mapped from. new_label: int label to map to Returns: Remapped labels """ new_labels = labels for original_label in original_labels: change = tf.where( tf.equal(new_labels, original_label), tf.add(tf.zeros_like(new_labels), new_label - original_label), tf.zeros_like(new_labels)) new_labels = tf.add( new_labels, change) new_labels = tf.reshape(new_labels, tf.shape(labels)) return new_labels def _flip_boxes_left_right(boxes): """Left-right flip the boxes. Args: boxes: Float32 tensor containing the bounding boxes -> [..., 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each last dimension is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1) flipped_xmin = tf.subtract(1.0, xmax) flipped_xmax = tf.subtract(1.0, xmin) flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1) return flipped_boxes def _flip_boxes_up_down(boxes): """Up-down flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_ymin = tf.subtract(1.0, ymax) flipped_ymax = tf.subtract(1.0, ymin) flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1) return flipped_boxes def _rot90_boxes(boxes): """Rotate boxes counter-clockwise by 90 degrees. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Rotated boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) rotated_ymin = tf.subtract(1.0, xmax) rotated_ymax = tf.subtract(1.0, xmin) rotated_xmin = ymin rotated_xmax = ymax rotated_boxes = tf.concat( [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1) return rotated_boxes def _flip_masks_left_right(masks): """Left-right flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, :, ::-1] def _flip_masks_up_down(masks): """Up-down flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, ::-1, :] def _rot90_masks(masks): """Rotate masks counter-clockwise by 90 degrees. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: rotated masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ masks = tf.transpose(masks, [0, 2, 1]) return masks[:, ::-1, :] def random_horizontal_flip(image, boxes=None, masks=None, keypoints=None, keypoint_visibilities=None, densepose_part_ids=None, densepose_surface_coords=None, keypoint_depths=None, keypoint_depth_weights=None, keypoint_flip_permutation=None, probability=0.5, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections horizontally. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_visibilities: (optional) rank 2 bool tensor with shape [num_instances, num_keypoints]. densepose_part_ids: (optional) rank 2 int32 tensor with shape [num_instances, num_points] holding the part id for each sampled point. These part_ids are 0-indexed, where the first non-background part has index 0. densepose_surface_coords: (optional) rank 3 float32 tensor with shape [num_instances, num_points, 4]. The DensePose coordinates are of the form (y, x, v, u) where (y, x) are the normalized image coordinates for a sampled point, and (v, u) is the surface coordinate for the part. keypoint_depths: (optional) rank 2 float32 tensor with shape [num_instances, num_keypoints] representing the relative depth of the keypoints. keypoint_depth_weights: (optional) rank 2 float32 tensor with shape [num_instances, num_keypoints] representing the weights of the relative depth of the keypoints. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. probability: the probability of performing this augmentation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, keypoint_visibilities, keypoint_flip_permutation, densepose_part_ids, or densepose_surface_coords are not None,the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] keypoint_visibilities: rank 2 bool tensor with shape [num_instances, num_keypoints]. densepose_part_ids: rank 2 int32 tensor with shape [num_instances, num_points]. densepose_surface_coords: rank 3 float32 tensor with shape [num_instances, num_points, 4]. keypoint_depths: rank 2 float32 tensor with shape [num_instances, num_keypoints] keypoint_depth_weights: rank 2 float32 tensor with shape [num_instances, num_keypoints]. Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. ValueError: if either densepose_part_ids or densepose_surface_coords is not None, but both are not None. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_left_right(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') if ((densepose_part_ids is not None and densepose_surface_coords is None) or (densepose_part_ids is None and densepose_surface_coords is not None)): raise ValueError( 'Must provide both `densepose_part_ids` and `densepose_surface_coords`') with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.less(do_a_flip_random, probability) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) # flip keypoint visibilities if (keypoint_visibilities is not None and keypoint_flip_permutation is not None): kpt_flip_perm = keypoint_flip_permutation keypoint_visibilities = tf.cond( do_a_flip_random, lambda: tf.gather(keypoint_visibilities, kpt_flip_perm, axis=1), lambda: keypoint_visibilities) result.append(keypoint_visibilities) # flip DensePose parts and coordinates if densepose_part_ids is not None: flip_densepose_fn = functools.partial( densepose_ops.flip_horizontal, densepose_part_ids, densepose_surface_coords) densepose_tensors = tf.cond( do_a_flip_random, flip_densepose_fn, lambda: (densepose_part_ids, densepose_surface_coords)) result.extend(densepose_tensors) # flip keypoint depths and weights. if (keypoint_depths is not None and keypoint_flip_permutation is not None): kpt_flip_perm = keypoint_flip_permutation keypoint_depths = tf.cond( do_a_flip_random, lambda: tf.gather(keypoint_depths, kpt_flip_perm, axis=1), lambda: keypoint_depths) keypoint_depth_weights = tf.cond( do_a_flip_random, lambda: tf.gather(keypoint_depth_weights, kpt_flip_perm, axis=1), lambda: keypoint_depth_weights) result.append(keypoint_depths) result.append(keypoint_depth_weights) return tuple(result) def random_vertical_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, probability=0.5, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections vertically. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. probability: the probability of performing this augmentation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_up_down(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomVerticalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.less(do_a_flip_random, probability) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_rotation90(image, boxes=None, masks=None, keypoints=None, keypoint_rot_permutation=None, probability=0.5, seed=None, preprocess_vars_cache=None): """Randomly rotates the image and detections 90 degrees counter-clockwise. The probability of rotating the image is 50%. This can be combined with random_horizontal_flip and random_vertical_flip to produce an output with a uniform distribution of the eight possible 90 degree rotation / reflection combinations. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_rot_permutation: rank 1 int32 tensor containing the keypoint flip permutation. probability: the probability of performing this augmentation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, and keypoints, are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def _rot90_image(image): # flip image image_rotated = tf.image.rot90(image) return image_rotated with tf.name_scope('RandomRotation90', values=[image, boxes]): result = [] # random variable defining whether to rotate by 90 degrees or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_rot90_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ROTATION90, preprocess_vars_cache) do_a_rot90_random = tf.less(do_a_rot90_random, probability) # flip image image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None: keypoints = tf.cond( do_a_rot90_random, lambda: keypoint_ops.rot90(keypoints, keypoint_rot_permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None, preprocess_vars_cache=None): """Scales each value in the pixels of the image. This function scales each pixel independent of the other ones. For each value in image tensor, draws a random number between minval and maxval and multiples the values with them. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. minval: lower ratio of scaling pixel values. maxval: upper ratio of scaling pixel values. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomPixelValueScale', values=[image]): generator_func = functools.partial( tf.random_uniform, tf.shape(image), minval=minval, maxval=maxval, dtype=tf.float32, seed=seed) color_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE, preprocess_vars_cache) image = tf.multiply(image, color_coef) image = tf.clip_by_value(image, 0.0, 255.0) return image def random_image_scale(image, masks=None, min_scale_ratio=0.5, max_scale_ratio=2.0, seed=None, preprocess_vars_cache=None): """Scales the image size. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels]. masks: (optional) rank 3 float32 tensor containing masks with size [height, width, num_masks]. The value is set to None if there are no masks. min_scale_ratio: minimum scaling ratio. max_scale_ratio: maximum scaling ratio. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. masks: If masks is not none, resized masks which are the same rank as input masks will be returned. """ with tf.name_scope('RandomImageScale', values=[image]): result = [] image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] generator_func = functools.partial( tf.random_uniform, [], minval=min_scale_ratio, maxval=max_scale_ratio, dtype=tf.float32, seed=seed) size_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE, preprocess_vars_cache) image_newysize = tf.cast( tf.multiply(tf.cast(image_height, dtype=tf.float32), size_coef), dtype=tf.int32) image_newxsize = tf.cast( tf.multiply(tf.cast(image_width, dtype=tf.float32), size_coef), dtype=tf.int32) image = tf.image.resize_images( image, [image_newysize, image_newxsize], align_corners=True) result.append(image) if masks is not None: masks = tf.image.resize_images( masks, [image_newysize, image_newxsize], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True) result.append(masks) return tuple(result) def _augment_only_rgb_channels(image, augment_function): """Augments only the RGB slice of an image with additional channels.""" rgb_slice = image[:, :, :3] augmented_rgb_slice = augment_function(rgb_slice) image = tf.concat([augmented_rgb_slice, image[:, :, 3:]], -1) return image def random_rgb_to_gray(image, probability=0.1, seed=None, preprocess_vars_cache=None): """Changes the image from RGB to Grayscale with the given probability. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. probability: the probability of returning a grayscale image. The probability should be a number between [0, 1]. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ def _image_to_gray(image): image_gray1 = _rgb_to_grayscale(image) image_gray3 = tf.image.grayscale_to_rgb(image_gray1) return image_gray3 with tf.name_scope('RandomRGBtoGray', values=[image]): # random variable defining whether to change to grayscale or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_gray_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY, preprocess_vars_cache) image = tf.cond( tf.greater(do_gray_random, probability), lambda: image, lambda: _augment_only_rgb_channels(image, _image_to_gray)) return image def adjust_gamma(image, gamma=1.0, gain=1.0): """Adjusts the gamma. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. gamma: the gamma value. Must be a non-negative real number. gain: a constant multiplier. Returns: image: image which is the same shape as input image. """ with tf.name_scope('AdjustGamma', values=[image]): def _adjust_gamma(image): image = tf.image.adjust_gamma(image / 255, gamma, gain) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image image = _augment_only_rgb_channels(image, _adjust_gamma) return image def random_adjust_brightness(image, max_delta=0.2, seed=None, preprocess_vars_cache=None): """Randomly adjusts brightness. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: how much to change the brightness. A value between [0, 1). seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustBrightness', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS, preprocess_vars_cache) def _adjust_brightness(image): image = tf.image.adjust_brightness(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image image = _augment_only_rgb_channels(image, _adjust_brightness) return image def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts contrast. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the contrast. Contrast will change with a value between min_delta and max_delta. This value will be multiplied to the current contrast of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustContrast', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) contrast_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST, preprocess_vars_cache) def _adjust_contrast(image): image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image image = _augment_only_rgb_channels(image, _adjust_contrast) return image def random_adjust_hue(image, max_delta=0.02, seed=None, preprocess_vars_cache=None): """Randomly adjusts hue. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: change hue randomly with a value between 0 and max_delta. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustHue', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE, preprocess_vars_cache) def _adjust_hue(image): image = tf.image.adjust_hue(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image image = _augment_only_rgb_channels(image, _adjust_hue) return image def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts saturation. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the saturation. Saturation will change with a value between min_delta and max_delta. This value will be multiplied to the current saturation of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustSaturation', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) saturation_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_SATURATION, preprocess_vars_cache) def _adjust_saturation(image): image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image image = _augment_only_rgb_channels(image, _adjust_saturation) return image def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None): """Randomly distorts color. Randomly distorts color using a combination of brightness, hue, contrast and saturation changes. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. color_ordering: Python int, a type of distortion (valid values: 0, 1). preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. Raises: ValueError: if color_ordering is not in {0, 1}. """ with tf.name_scope('RandomDistortColor', values=[image]): if color_ordering == 0: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) elif color_ordering == 1: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) else: raise ValueError('color_ordering must be in {0, 1}') return image def random_jitter_boxes(boxes, ratio=0.05, jitter_mode='default', seed=None): """Randomly jitters boxes in image. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. ratio: The ratio of the box width and height that the corners can jitter. For example if the width is 100 pixels and ratio is 0.05, the corners can jitter up to 5 pixels in the x direction. jitter_mode: One of shrink - Only shrinks boxes. expand - Only expands boxes. expand_symmetric - Expands the boxes symmetrically along height and width dimensions without changing the box center. The ratios of expansion along X, Y dimensions are independent shrink_symmetric - Shrinks the boxes symmetrically along height and width dimensions without changing the box center. The ratios of shrinking along X, Y dimensions are independent expand_symmetric_xy - Expands the boxes symetrically along height and width dimensions and the ratio of expansion is same for both. shrink_symmetric_xy - Shrinks the boxes symetrically along height and width dimensions and the ratio of shrinking is same for both. default - Randomly and independently perturbs each box boundary. seed: random seed. Returns: boxes: boxes which is the same shape as input boxes. """ with tf.name_scope('RandomJitterBoxes'): ymin, xmin, ymax, xmax = (boxes[:, i] for i in range(4)) blist = box_list.BoxList(boxes) ycenter, xcenter, height, width = blist.get_center_coordinates_and_sizes() height = tf.maximum(tf.abs(height), 1e-6) width = tf.maximum(tf.abs(width), 1e-6) if jitter_mode in ['shrink', 'shrink_symmetric', 'shrink_symmetric_xy']: min_ratio, max_ratio = -ratio, 0 elif jitter_mode in ['expand', 'expand_symmetric', 'expand_symmetric_xy']: min_ratio, max_ratio = 0, ratio elif jitter_mode == 'default': min_ratio, max_ratio = -ratio, ratio else: raise ValueError('Unknown jitter mode - %s' % jitter_mode) num_boxes = tf.shape(boxes)[0] if jitter_mode in ['expand_symmetric', 'shrink_symmetric', 'expand_symmetric_xy', 'shrink_symmetric_xy']: distortion = 1.0 + tf.random.uniform( [num_boxes, 2], minval=min_ratio, maxval=max_ratio, dtype=tf.float32, seed=seed) height_distortion = distortion[:, 0] width_distortion = distortion[:, 1] # This is to ensure that all boxes are augmented symmetrically. We clip # each boundary to lie within the image, and when doing so, we also # adjust its symmetric counterpart. max_height_distortion = tf.abs(tf.minimum( (2.0 * ycenter) / height, 2.0 * (1 - ycenter) / height)) max_width_distortion = tf.abs(tf.minimum( (2.0 * xcenter) / width, 2.0 * (1 - xcenter) / width)) if jitter_mode in ['expand_symmetric_xy', 'shrink_symmetric_xy']: height_distortion = width_distortion = distortion[:, 0] max_height_distortion = max_width_distortion = ( tf.minimum(max_width_distortion, max_height_distortion)) height_distortion = tf.clip_by_value( height_distortion, -max_height_distortion, max_height_distortion) width_distortion = tf.clip_by_value( width_distortion, -max_width_distortion, max_width_distortion) ymin = ycenter - (height * height_distortion / 2.0) ymax = ycenter + (height * height_distortion / 2.0) xmin = xcenter - (width * width_distortion / 2.0) xmax = xcenter + (width * width_distortion / 2.0) elif jitter_mode in ['expand', 'shrink', 'default']: distortion = 1.0 + tf.random.uniform( [num_boxes, 4], minval=min_ratio, maxval=max_ratio, dtype=tf.float32, seed=seed) ymin_jitter = height * distortion[:, 0] xmin_jitter = width * distortion[:, 1] ymax_jitter = height * distortion[:, 2] xmax_jitter = width * distortion[:, 3] ymin, ymax = ycenter - (ymin_jitter / 2.0), ycenter + (ymax_jitter / 2.0) xmin, xmax = xcenter - (xmin_jitter / 2.0), xcenter + (xmax_jitter / 2.0) else: raise ValueError('Unknown jitter mode - %s' % jitter_mode) boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1) return tf.clip_by_value(boxes, 0.0, 1.0) def _strict_random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, mask_weights=None, keypoints=None, keypoint_visibilities=None, densepose_num_points=None, densepose_part_ids=None, densepose_surface_coords=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, preprocess_vars_cache=None): """Performs random crop. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. This function always crops the image and is supposed to be used by `random_crop_image` function which sometimes returns the image unchanged. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. mask_weights: (optional) rank 1 float32 tensor with shape [num_instances] with instance masks weights. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_visibilities: (optional) rank 2 bool tensor with shape [num_instances, num_keypoints]. densepose_num_points: (optional) rank 1 int32 tensor with shape [num_instances] with the number of sampled points per instance. densepose_part_ids: (optional) rank 2 int32 tensor with shape [num_instances, num_points] holding the part id for each sampled point. These part_ids are 0-indexed, where the first non-background part has index 0. densepose_surface_coords: (optional) rank 3 float32 tensor with shape [num_instances, num_points, 4]. The DensePose coordinates are of the form (y, x, v, u) where (y, x) are the normalized image coordinates for a sampled point, and (v, u) is the surface coordinate for the part. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, mask_weights, keypoints, keypoint_visibilities, densepose_num_points, densepose_part_ids, or densepose_surface_coords is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. mask_weights: rank 1 float32 tensor with shape [num_instances] with mask weights. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] keypoint_visibilities: rank 2 bool tensor with shape [num_instances, num_keypoints] densepose_num_points: rank 1 int32 tensor with shape [num_instances]. densepose_part_ids: rank 2 int32 tensor with shape [num_instances, num_points]. densepose_surface_coords: rank 3 float32 tensor with shape [num_instances, num_points, 4]. Raises: ValueError: If some but not all of the DensePose tensors are provided. """ with tf.name_scope('RandomCropImage', values=[image, boxes]): densepose_tensors = [densepose_num_points, densepose_part_ids, densepose_surface_coords] if (any(t is not None for t in densepose_tensors) and not all(t is not None for t in densepose_tensors)): raise ValueError('If cropping DensePose labels, must provide ' '`densepose_num_points`, `densepose_part_ids`, and ' '`densepose_surface_coords`') image_shape = tf.shape(image) # boxes are [N, 4]. Lets first make them [N, 1, 4]. boxes_expanded = tf.expand_dims( tf.clip_by_value( boxes, clip_value_min=0.0, clip_value_max=1.0), 1) generator_func = functools.partial( tf.image.sample_distorted_bounding_box, image_shape, bounding_boxes=boxes_expanded, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=100, use_image_if_no_bounding_boxes=True) # for ssd cropping, each value of min_object_covered has its own # cached random variable sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE, preprocess_vars_cache, key=min_object_covered) im_box_begin, im_box_size, im_box = sample_distorted_bounding_box im_box_end = im_box_begin + im_box_size new_image = image[im_box_begin[0]:im_box_end[0], im_box_begin[1]:im_box_end[1], :] new_image.set_shape([None, None, image.get_shape()[2]]) # [1, 4] im_box_rank2 = tf.squeeze(im_box, axis=[0]) # [4] im_box_rank1 = tf.squeeze(im_box) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) if label_weights is not None: boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(im_box_rank2) # remove boxes that are outside cropped image boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( boxlist, im_box_rank1) # remove boxes that are outside image overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box_rank1) new_boxes = new_boxlist.get() if clip_boxes: new_boxes = tf.clip_by_value( new_boxes, clip_value_min=0.0, clip_value_max=1.0) result = [new_image, new_boxes, new_labels] if label_weights is not None: new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = overlapping_boxlist.get_field('label_confidences') result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) masks_of_boxes_completely_inside_window = tf.gather( masks_of_boxes_inside_window, keep_ids) new_masks = masks_of_boxes_completely_inside_window[:, im_box_begin[ 0]:im_box_end[0], im_box_begin[1]:im_box_end[1]] result.append(new_masks) if mask_weights is not None: mask_weights_inside_window = tf.gather(mask_weights, inside_window_ids) mask_weights_completely_inside_window = tf.gather( mask_weights_inside_window, keep_ids) result.append(mask_weights_completely_inside_window) if keypoints is not None: keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids) keypoints_of_boxes_completely_inside_window = tf.gather( keypoints_of_boxes_inside_window, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_of_boxes_completely_inside_window, im_box_rank1) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) if keypoint_visibilities is not None: kpt_vis_of_boxes_inside_window = tf.gather(keypoint_visibilities, inside_window_ids) kpt_vis_of_boxes_completely_inside_window = tf.gather( kpt_vis_of_boxes_inside_window, keep_ids) if clip_boxes: # Set any keypoints with NaN coordinates to invisible. new_kpt_visibilities = keypoint_ops.set_keypoint_visibilities( new_keypoints, kpt_vis_of_boxes_completely_inside_window) result.append(new_kpt_visibilities) if densepose_num_points is not None: filtered_dp_tensors = [] for dp_tensor in densepose_tensors: dp_tensor_inside_window = tf.gather(dp_tensor, inside_window_ids) dp_tensor_completely_inside_window = tf.gather(dp_tensor_inside_window, keep_ids) filtered_dp_tensors.append(dp_tensor_completely_inside_window) new_dp_num_points = filtered_dp_tensors[0] new_dp_point_ids = filtered_dp_tensors[1] new_dp_surf_coords = densepose_ops.change_coordinate_frame( filtered_dp_tensors[2], im_box_rank1) if clip_boxes: new_dp_num_points, new_dp_point_ids, new_dp_surf_coords = ( densepose_ops.prune_outside_window( new_dp_num_points, new_dp_point_ids, new_dp_surf_coords, window=[0.0, 0.0, 1.0, 1.0])) result.extend([new_dp_num_points, new_dp_point_ids, new_dp_surf_coords]) return tuple(result) def random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, mask_weights=None, keypoints=None, keypoint_visibilities=None, densepose_num_points=None, densepose_part_ids=None, densepose_surface_coords=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly crops the image. Given the input image and its bounding boxes, this op randomly crops a subimage. Given a user-provided set of input constraints, the crop window is resampled until it satisfies these constraints. If within 100 trials it is unable to find a valid crop, the original image is returned. See the Args section for a description of the input constraints. Both input boxes and returned Boxes are in normalized form (e.g., lie in the unit square [0, 1]). This function will return the original image with probability random_coef. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. Also, the keypoint visibility will be set to False. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances]. representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. mask_weights: (optional) rank 1 float32 tensor with shape [num_instances] containing weights for each instance mask. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_visibilities: (optional) rank 2 bool tensor with shape [num_instances, num_keypoints]. densepose_num_points: (optional) rank 1 int32 tensor with shape [num_instances] with the number of sampled points per instance. densepose_part_ids: (optional) rank 2 int32 tensor with shape [num_instances, num_points] holding the part id for each sampled point. These part_ids are 0-indexed, where the first non-background part has index 0. densepose_surface_coords: (optional) rank 3 float32 tensor with shape [num_instances, num_points, 4]. The DensePose coordinates are of the form (y, x, v, u) where (y, x) are the normalized image coordinates for a sampled point, and (v, u) is the surface coordinate for the part. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, keypoints, keypoint_visibilities, densepose_num_points, densepose_part_ids, densepose_surface_coords is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. mask_weights: rank 1 float32 tensor with shape [num_instances]. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] keypoint_visibilities: rank 2 bool tensor with shape [num_instances, num_keypoints] densepose_num_points: rank 1 int32 tensor with shape [num_instances]. densepose_part_ids: rank 2 int32 tensor with shape [num_instances, num_points]. densepose_surface_coords: rank 3 float32 tensor with shape [num_instances, num_points, 4]. """ def strict_random_crop_image_fn(): return _strict_random_crop_image( image, boxes, labels, label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, mask_weights=mask_weights, keypoints=keypoints, keypoint_visibilities=keypoint_visibilities, densepose_num_points=densepose_num_points, densepose_part_ids=densepose_part_ids, densepose_surface_coords=densepose_surface_coords, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, preprocess_vars_cache=preprocess_vars_cache) # avoids tf.cond to make faster RCNN training on borg. See b/140057645. if random_coef < sys.float_info.min: result = strict_random_crop_image_fn() else: generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_crop_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE, preprocess_vars_cache) do_a_crop_random = tf.greater(do_a_crop_random, random_coef) outputs = [image, boxes, labels] if label_weights is not None: outputs.append(label_weights) if label_confidences is not None: outputs.append(label_confidences) if multiclass_scores is not None: outputs.append(multiclass_scores) if masks is not None: outputs.append(masks) if mask_weights is not None: outputs.append(mask_weights) if keypoints is not None: outputs.append(keypoints) if keypoint_visibilities is not None: outputs.append(keypoint_visibilities) if densepose_num_points is not None: outputs.extend([densepose_num_points, densepose_part_ids, densepose_surface_coords]) result = tf.cond(do_a_crop_random, strict_random_crop_image_fn, lambda: tuple(outputs)) return result def random_pad_image(image, boxes, masks=None, keypoints=None, densepose_surface_coords=None, min_image_size=None, max_image_size=None, pad_color=None, center_pad=False, seed=None, preprocess_vars_cache=None): """Randomly pads the image. This function randomly pads the image with zeros. The final size of the padded image will be between min_image_size and max_image_size. if min_image_size is smaller than the input image size, min_image_size will be set to the input image size. The same for max_image_size. The input image will be located at a uniformly random location inside the padded image. The relative location of the boxes to the original image will remain the same. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [N, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [N, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. densepose_surface_coords: (optional) rank 3 float32 tensor with shape [N, num_points, 4]. The DensePose coordinates are of the form (y, x, v, u) where (y, x) are the normalized image coordinates for a sampled point, and (v, u) is the surface coordinate for the part. min_image_size: a tensor of size [min_height, min_width], type tf.int32. If passed as None, will be set to image size [height, width]. max_image_size: a tensor of size [max_height, max_width], type tf.int32. If passed as None, will be set to twice the image [height * 2, width * 2]. pad_color: padding color. A rank 1 tensor of [channels] with dtype= tf.float32. if set as None, it will be set to average color of the input image. center_pad: whether the original image will be padded to the center, or randomly padded (which is default). seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. if masks is not None, the function also returns: masks: rank 3 float32 tensor with shape [N, new_height, new_width] if keypoints is not None, the function also returns: keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] if densepose_surface_coords is not None, the function also returns: densepose_surface_coords: rank 3 float32 tensor with shape [num_instances, num_points, 4] """ if pad_color is None: pad_color = tf.reduce_mean(image, axis=[0, 1]) image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] if max_image_size is None: max_image_size = tf.stack([image_height * 2, image_width * 2]) max_image_size = tf.maximum(max_image_size, tf.stack([image_height, image_width])) if min_image_size is None: min_image_size = tf.stack([image_height, image_width]) min_image_size = tf.maximum(min_image_size, tf.stack([image_height, image_width])) target_height = tf.cond( max_image_size[0] > min_image_size[0], lambda: _random_integer(min_image_size[0], max_image_size[0], seed), lambda: max_image_size[0]) target_width = tf.cond( max_image_size[1] > min_image_size[1], lambda: _random_integer(min_image_size[1], max_image_size[1], seed), lambda: max_image_size[1]) offset_height = tf.cond( target_height > image_height, lambda: _random_integer(0, target_height - image_height, seed), lambda: tf.constant(0, dtype=tf.int32)) offset_width = tf.cond( target_width > image_width, lambda: _random_integer(0, target_width - image_width, seed), lambda: tf.constant(0, dtype=tf.int32)) if center_pad: offset_height = tf.cast(tf.floor((target_height - image_height) / 2), tf.int32) offset_width = tf.cast(tf.floor((target_width - image_width) / 2), tf.int32) gen_func = lambda: (target_height, target_width, offset_height, offset_width) params = _get_or_create_preprocess_rand_vars( gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE, preprocess_vars_cache) target_height, target_width, offset_height, offset_width = params new_image = tf.image.pad_to_bounding_box( image, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) # Setting color of the padded pixels image_ones = tf.ones_like(image) image_ones_padded = tf.image.pad_to_bounding_box( image_ones, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) image_color_padded = (1.0 - image_ones_padded) * pad_color new_image += image_color_padded # setting boxes new_window = tf.cast( tf.stack([ -offset_height, -offset_width, target_height - offset_height, target_width - offset_width ]), dtype=tf.float32) new_window /= tf.cast( tf.stack([image_height, image_width, image_height, image_width]), dtype=tf.float32) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) new_boxes = new_boxlist.get() result = [new_image, new_boxes] if masks is not None: new_masks = tf.image.pad_to_bounding_box( masks[:, :, :, tf.newaxis], offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width)[:, :, :, 0] result.append(new_masks) if keypoints is not None: new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window) result.append(new_keypoints) if densepose_surface_coords is not None: new_densepose_surface_coords = densepose_ops.change_coordinate_frame( densepose_surface_coords, new_window) result.append(new_densepose_surface_coords) return tuple(result) def random_absolute_pad_image(image, boxes, masks=None, keypoints=None, densepose_surface_coords=None, max_height_padding=None, max_width_padding=None, pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly pads the image by small absolute amounts. As random_pad_image above, but the padding is of size [0, max_height_padding] or [0, max_width_padding] instead of padding to a fixed size of max_height_padding for all images. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [N, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [N, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. densepose_surface_coords: (optional) rank 3 float32 tensor with shape [N, num_points, 4]. The DensePose coordinates are of the form (y, x, v, u) where (y, x) are the normalized image coordinates for a sampled point, and (v, u) is the surface coordinate for the part. max_height_padding: a scalar tf.int32 tensor denoting the maximum amount of height padding. The padding will be chosen uniformly at random from [0, max_height_padding). max_width_padding: a scalar tf.int32 tensor denoting the maximum amount of width padding. The padding will be chosen uniformly at random from [0, max_width_padding). pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the input image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. if masks is not None, the function also returns: masks: rank 3 float32 tensor with shape [N, new_height, new_width] if keypoints is not None, the function also returns: keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] """ min_image_size = tf.shape(image)[:2] max_image_size = min_image_size + tf.cast( [max_height_padding, max_width_padding], dtype=tf.int32) return random_pad_image( image, boxes, masks=masks, keypoints=keypoints, densepose_surface_coords=densepose_surface_coords, min_image_size=min_image_size, max_image_size=max_image_size, pad_color=pad_color, seed=seed, preprocess_vars_cache=preprocess_vars_cache) def random_crop_pad_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly crops and pads the image. Given an input image and its bounding boxes, this op first randomly crops the image and then randomly pads the image with background values. Parameters min_padded_size_ratio and max_padded_size_ratio, determine the range of the final output image size. Specifically, the final image size will have a size in the range of min_padded_size_ratio * tf.shape(image) and max_padded_size_ratio * tf.shape(image). Note that these ratios are with respect to the size of the original image, so we can't capture the same effect easily by independently applying RandomCropImage followed by RandomPadImage. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 containing the label weights. label_confidences: rank 1 float32 containing the label confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: padded_image: padded image. padded_boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. cropped_labels: cropped labels. if label_weights is not None also returns: cropped_label_weights: cropped label weights. if multiclass_scores is not None also returns: cropped_multiclass_scores: cropped_multiclass_scores. """ image_size = tf.shape(image) image_height = image_size[0] image_width = image_size[1] result = random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_image, cropped_boxes, cropped_labels = result[:3] min_image_size = tf.cast( tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * min_padded_size_ratio, dtype=tf.int32) max_image_size = tf.cast( tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * max_padded_size_ratio, dtype=tf.int32) padded_image, padded_boxes = random_pad_image( # pylint: disable=unbalanced-tuple-unpacking cropped_image, cropped_boxes, min_image_size=min_image_size, max_image_size=max_image_size, pad_color=pad_color, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_padded_output = (padded_image, padded_boxes, cropped_labels) index = 3 if label_weights is not None: cropped_label_weights = result[index] cropped_padded_output += (cropped_label_weights,) index += 1 if label_confidences is not None: cropped_label_confidences = result[index] cropped_padded_output += (cropped_label_confidences,) index += 1 if multiclass_scores is not None: cropped_multiclass_scores = result[index] cropped_padded_output += (cropped_multiclass_scores,) return cropped_padded_output def random_crop_to_aspect_ratio(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, aspect_ratio=1.0, overlap_thresh=0.3, clip_boxes=True, center_crop=False, seed=None, preprocess_vars_cache=None): """Randomly crops an image to the specified aspect ratio. Randomly crops the a portion of the image such that the crop is of the specified aspect ratio, and the crop is as large as possible. If the specified aspect ratio is larger than the aspect ratio of the image, this op will randomly remove rows from the top and bottom of the image. If the specified aspect ratio is less than the aspect ratio of the image, this op will randomly remove cols from the left and right of the image. If the specified aspect ratio is the same as the aspect ratio of the image, this op will return the image. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: the aspect ratio of cropped image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. center_crop: whether to take the center crop or a random crop. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, masks, keypoints, or multiclass_scores is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomCropToAspectRatio', values=[image]): image_shape = tf.shape(image) orig_height = image_shape[0] orig_width = image_shape[1] orig_aspect_ratio = tf.cast( orig_width, dtype=tf.float32) / tf.cast( orig_height, dtype=tf.float32) new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) def target_height_fn(): return tf.cast( tf.round(tf.cast(orig_width, dtype=tf.float32) / new_aspect_ratio), dtype=tf.int32) target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio, lambda: orig_height, target_height_fn) def target_width_fn(): return tf.cast( tf.round(tf.cast(orig_height, dtype=tf.float32) * new_aspect_ratio), dtype=tf.int32) target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio, lambda: orig_width, target_width_fn) # either offset_height = 0 and offset_width is randomly chosen from # [0, offset_width - target_width), or else offset_width = 0 and # offset_height is randomly chosen from [0, offset_height - target_height) if center_crop: offset_height = tf.cast(tf.math.floor((orig_height - target_height) / 2), tf.int32) offset_width = tf.cast(tf.math.floor((orig_width - target_width) / 2), tf.int32) else: offset_height = _random_integer(0, orig_height - target_height + 1, seed) offset_width = _random_integer(0, orig_width - target_width + 1, seed) generator_func = lambda: (offset_height, offset_width) offset_height, offset_width = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO, preprocess_vars_cache) new_image = tf.image.crop_to_bounding_box( image, offset_height, offset_width, target_height, target_width) im_box = tf.stack([ tf.cast(offset_height, dtype=tf.float32) / tf.cast(orig_height, dtype=tf.float32), tf.cast(offset_width, dtype=tf.float32) / tf.cast(orig_width, dtype=tf.float32), tf.cast(offset_height + target_height, dtype=tf.float32) / tf.cast(orig_height, dtype=tf.float32), tf.cast(offset_width + target_width, dtype=tf.float32) / tf.cast(orig_width, dtype=tf.float32) ]) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0)) # remove boxes whose overlap with the image is less than overlap_thresh overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box) if clip_boxes: new_boxlist = box_list_ops.clip_to_window( new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32)) new_boxes = new_boxlist.get() result = [new_image, new_boxes, new_labels] new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = ( overlapping_boxlist.get_field('label_confidences')) result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_inside_window = tf.gather(masks, keep_ids) masks_box_begin = tf.stack([0, offset_height, offset_width]) masks_box_size = tf.stack([-1, target_height, target_width]) new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if keypoints is not None: keypoints_inside_window = tf.gather(keypoints, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_inside_window, im_box) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) return tuple(result) def random_pad_to_aspect_ratio(image, boxes, masks=None, keypoints=None, aspect_ratio=1.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Randomly zero pads an image to the specified aspect ratio. Pads the image so that the resulting image will have the specified aspect ratio without scaling less than the min_padded_size_ratio or more than the max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio is lower than what is possible to maintain the aspect ratio, then this method will use the least padding to achieve the specified aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: aspect ratio of the final image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If masks, or keypoints is not None, the function also returns: masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomPadToAspectRatio', values=[image]): image_shape = tf.shape(image) image_height = tf.cast(image_shape[0], dtype=tf.float32) image_width = tf.cast(image_shape[1], dtype=tf.float32) image_aspect_ratio = image_width / image_height new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) target_height = tf.cond( image_aspect_ratio <= new_aspect_ratio, lambda: image_height, lambda: image_width / new_aspect_ratio) target_width = tf.cond( image_aspect_ratio >= new_aspect_ratio, lambda: image_width, lambda: image_height * new_aspect_ratio) min_height = tf.maximum( min_padded_size_ratio[0] * image_height, target_height) min_width = tf.maximum( min_padded_size_ratio[1] * image_width, target_width) max_height = tf.maximum( max_padded_size_ratio[0] * image_height, target_height) max_width = tf.maximum( max_padded_size_ratio[1] * image_width, target_width) max_scale = tf.minimum(max_height / target_height, max_width / target_width) min_scale = tf.minimum( max_scale, tf.maximum(min_height / target_height, min_width / target_width)) generator_func = functools.partial(tf.random_uniform, [], min_scale, max_scale, seed=seed) scale = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO, preprocess_vars_cache) target_height = tf.round(scale * target_height) target_width = tf.round(scale * target_width) new_image = tf.image.pad_to_bounding_box( image, 0, 0, tf.cast(target_height, dtype=tf.int32), tf.cast(target_width, dtype=tf.int32)) im_box = tf.stack([ 0.0, 0.0, target_height / image_height, target_width / image_width ]) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box) new_boxes = new_boxlist.get() result = [new_image, new_boxes] if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, tf.cast(target_height, dtype=tf.int32), tf.cast(target_width, dtype=tf.int32)) new_masks = tf.squeeze(new_masks, [-1]) result.append(new_masks) if keypoints is not None: new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) result.append(new_keypoints) return tuple(result) def random_black_patches(image, max_black_patches=10, probability=0.5, size_to_image_ratio=0.1, random_seed=None, preprocess_vars_cache=None): """Randomly adds some black patches to the image. This op adds up to max_black_patches square black patches of a fixed size to the image where size is specified via the size_to_image_ratio parameter. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. max_black_patches: number of times that the function tries to add a black box to the image. probability: at each try, what is the chance of adding a box. size_to_image_ratio: Determines the ratio of the size of the black patches to the size of the image. box_size = size_to_image_ratio * min(image_width, image_height) random_seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image """ def add_black_patch_to_image(image, idx): """Function for adding one patch to the image. Args: image: image idx: counter for number of patches that could have been added Returns: image with a randomly added black box """ image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] box_size = tf.cast( tf.multiply( tf.minimum( tf.cast(image_height, dtype=tf.float32), tf.cast(image_width, dtype=tf.float32)), size_to_image_ratio), dtype=tf.int32) generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed) normalized_y_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'y') normalized_x_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'x') y_min = tf.cast( normalized_y_min * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) x_min = tf.cast( normalized_x_min * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32) black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32) mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min, image_height, image_width) image = tf.multiply(image, mask) return image with tf.name_scope('RandomBlackPatchInImage', values=[image]): for idx in range(max_black_patches): generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=1.0, dtype=tf.float32, seed=random_seed) random_prob = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.BLACK_PATCHES, preprocess_vars_cache, key=idx) image = tf.cond( tf.greater(random_prob, probability), lambda: image, functools.partial(add_black_patch_to_image, image=image, idx=idx)) return image def random_jpeg_quality(image, min_jpeg_quality=0, max_jpeg_quality=100, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly encode the image to a random JPEG quality level. Args: image: rank 3 float32 tensor with shape [height, width, channels] and values in the range [0, 255]. min_jpeg_quality: An int for the lower bound for selecting a random jpeg quality level. max_jpeg_quality: An int for the upper bound for selecting a random jpeg quality level. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the encoded image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ def _adjust_jpeg_quality(): """Encodes the image as jpeg with a random quality and then decodes.""" generator_func = functools.partial( tf.random_uniform, [], minval=min_jpeg_quality, maxval=max_jpeg_quality, dtype=tf.int32, seed=seed) quality = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, preprocess_vars_cache, key='quality') # Need to convert to uint8 before calling adjust_jpeg_quality since it # assumes that float features are in the range [0, 1], where herein the # range is [0, 255]. image_uint8 = tf.cast(image, tf.uint8) adjusted_image = tf.image.adjust_jpeg_quality(image_uint8, quality) return tf.cast(adjusted_image, tf.float32) with tf.name_scope('RandomJpegQuality', values=[image]): generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_encoding_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, preprocess_vars_cache) do_encoding_random = tf.greater_equal(do_encoding_random, random_coef) image = tf.cond(do_encoding_random, _adjust_jpeg_quality, lambda: tf.cast(image, tf.float32)) return image def random_downscale_to_target_pixels(image, masks=None, min_target_pixels=300000, max_target_pixels=800000, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly downscales the image to a target number of pixels. If the image contains less than the chosen target number of pixels, it will not be downscaled. Args: image: Rank 3 float32 tensor with shape [height, width, channels] and values in the range [0, 255]. masks: (optional) Rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. min_target_pixels: Integer. An inclusive lower bound for for the target number of pixels. max_target_pixels: Integer. An exclusive upper bound for for the target number of pixels. random_coef: Float. Random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always apply downscaling, and if it is 1.0, we will always get the original image. seed: (optional) Integer. Random seed. preprocess_vars_cache: (optional) PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: Tuple with elements: image: Resized image which is the same rank as input image. masks: If masks is not None, resized masks which are the same rank as the input masks. Raises: ValueError: If min_target_pixels or max_target_pixels are not positive. """ if min_target_pixels <= 0: raise ValueError('Minimum target pixels must be positive') if max_target_pixels <= 0: raise ValueError('Maximum target pixels must be positive') def _resize_image_to_target(target_height, target_width): # pylint: disable=unbalanced-tuple-unpacking new_image, _ = resize_image(image, None, target_height, target_width) return (new_image,) def _resize_image_and_masks_to_target(target_height, target_width): # pylint: disable=unbalanced-tuple-unpacking new_image, new_masks, _ = resize_image(image, masks, target_height, target_width) return new_image, new_masks with tf.name_scope('RandomDownscaleToTargetPixels', values=[image]): generator_fn = functools.partial(tf.random_uniform, [], seed=seed) do_downscale_random = _get_or_create_preprocess_rand_vars( generator_fn, preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, preprocess_vars_cache) do_downscale_random = tf.greater_equal(do_downscale_random, random_coef) generator_fn = functools.partial( tf.random_uniform, [], minval=min_target_pixels, maxval=max_target_pixels, dtype=tf.int32, seed=seed) target_pixels = _get_or_create_preprocess_rand_vars( generator_fn, preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, preprocess_vars_cache, key='target_pixels') image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] image_pixels = image_height * image_width scale_factor = tf.sqrt( tf.cast(target_pixels, dtype=tf.float32) / tf.cast(image_pixels, dtype=tf.float32)) target_height = tf.cast( scale_factor * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) target_width = tf.cast( scale_factor * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32) image_larger_than_target = tf.greater(image_pixels, target_pixels) should_apply_resize = tf.logical_and(do_downscale_random, image_larger_than_target) if masks is not None: resize_fn = functools.partial(_resize_image_and_masks_to_target, target_height, target_width) return tf.cond(should_apply_resize, resize_fn, lambda: (tf.cast(image, dtype=tf.float32), masks)) else: resize_fn = lambda: _resize_image_to_target(target_height, target_width) return tf.cond(should_apply_resize, resize_fn, lambda: (tf.cast(image, dtype=tf.float32),)) def random_patch_gaussian(image, min_patch_size=1, max_patch_size=250, min_gaussian_stddev=0.0, max_gaussian_stddev=1.0, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly applies gaussian noise to a random patch on the image. The gaussian noise is applied to the image with values scaled to the range [0.0, 1.0]. The result of applying gaussian noise to the scaled image is clipped to be within the range [0.0, 1.0], equivalent to the range [0.0, 255.0] after rescaling the image back. See "Improving Robustness Without Sacrificing Accuracy with Patch Gaussian Augmentation " by Lopes et al., 2019, for further details. https://arxiv.org/abs/1906.02611 Args: image: Rank 3 float32 tensor with shape [height, width, channels] and values in the range [0.0, 255.0]. min_patch_size: Integer. An inclusive lower bound for the patch size. max_patch_size: Integer. An exclusive upper bound for the patch size. min_gaussian_stddev: Float. An inclusive lower bound for the standard deviation of the gaussian noise. max_gaussian_stddev: Float. An exclusive upper bound for the standard deviation of the gaussian noise. random_coef: Float. Random coefficient that defines the chance of getting the original image. If random_coef is 0.0, we will always apply downscaling, and if it is 1.0, we will always get the original image. seed: (optional) Integer. Random seed. preprocess_vars_cache: (optional) PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: Rank 3 float32 tensor with same shape as the input image and with gaussian noise applied within a random patch. Raises: ValueError: If min_patch_size is < 1. """ if min_patch_size < 1: raise ValueError('Minimum patch size must be >= 1.') get_or_create_rand_vars_fn = functools.partial( _get_or_create_preprocess_rand_vars, function_id=preprocessor_cache.PreprocessorCache.PATCH_GAUSSIAN, preprocess_vars_cache=preprocess_vars_cache) def _apply_patch_gaussian(image): """Applies a patch gaussian with random size, location, and stddev.""" patch_size = get_or_create_rand_vars_fn( functools.partial( tf.random_uniform, [], minval=min_patch_size, maxval=max_patch_size, dtype=tf.int32, seed=seed), key='patch_size') gaussian_stddev = get_or_create_rand_vars_fn( functools.partial( tf.random_uniform, [], minval=min_gaussian_stddev, maxval=max_gaussian_stddev, dtype=tf.float32, seed=seed), key='gaussian_stddev') image_shape = tf.shape(image) y = get_or_create_rand_vars_fn( functools.partial( tf.random_uniform, [], minval=0, maxval=image_shape[0], dtype=tf.int32, seed=seed), key='y') x = get_or_create_rand_vars_fn( functools.partial( tf.random_uniform, [], minval=0, maxval=image_shape[1], dtype=tf.int32, seed=seed), key='x') gaussian = get_or_create_rand_vars_fn( functools.partial( tf.random.normal, image_shape, stddev=gaussian_stddev, dtype=tf.float32, seed=seed), key='gaussian') scaled_image = image / 255.0 image_plus_gaussian = tf.clip_by_value(scaled_image + gaussian, 0.0, 1.0) patch_mask = patch_ops.get_patch_mask(y, x, patch_size, image_shape) patch_mask = tf.expand_dims(patch_mask, -1) patch_mask = tf.tile(patch_mask, [1, 1, image_shape[2]]) patched_image = tf.where(patch_mask, image_plus_gaussian, scaled_image) return patched_image * 255.0 with tf.name_scope('RandomPatchGaussian', values=[image]): image = tf.cast(image, tf.float32) patch_gaussian_random = get_or_create_rand_vars_fn( functools.partial(tf.random_uniform, [], seed=seed)) do_patch_gaussian = tf.greater_equal(patch_gaussian_random, random_coef) image = tf.cond(do_patch_gaussian, lambda: _apply_patch_gaussian(image), lambda: image) return image def autoaugment_image(image, boxes, policy_name='v0'): """Apply an autoaugment policy to the image and boxes. See "AutoAugment: Learning Augmentation Policies from Data" by Cubuk et al., 2018, for further details. https://arxiv.org/abs/1805.09501 Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. policy_name: The name of the AutoAugment policy to use. The available options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for all of the results in the paper and was found to achieve the best results on the COCO dataset. `v1`, `v2` and `v3` are additional good policies found on the COCO dataset that have slight variation in what operations were used during the search procedure along with how many operations are applied in parallel to a single image (2 vs 3). Returns: image: the augmented image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. boxes will have been augmented along with image. """ return autoaugment_utils.distort_image_with_autoaugment( image, boxes, policy_name) def image_to_float(image): """Used in Faster R-CNN. Casts image pixel values to float. Args: image: input image which might be in tf.uint8 or sth else format Returns: image: image in tf.float32 format. """ with tf.name_scope('ImageToFloat', values=[image]): image = tf.cast(image, dtype=tf.float32) return image def random_resize_method(image, target_size, preprocess_vars_cache=None): """Uses a random resize method to resize the image to target size. Args: image: a rank 3 tensor. target_size: a list of [target_height, target_width] preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: resized image. """ resized_image = _apply_with_random_selector( image, lambda x, method: tf.image.resize_images(x, target_size, method), num_cases=4, preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD) return resized_image def resize_to_range(image, masks=None, min_dimension=None, max_dimension=None, method=tf.image.ResizeMethod.BILINEAR, align_corners=False, pad_to_max_dimension=False, per_channel_pad_value=(0, 0, 0)): """Resizes an image so its dimensions are within the provided value. The output size can be described by two cases: 1. If the image can be rescaled so its minimum dimension is equal to the provided value without the other dimension exceeding max_dimension, then do so. 2. Otherwise, resize so the largest dimension is equal to max_dimension. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. min_dimension: (optional) (scalar) desired size of the smaller image dimension. max_dimension: (optional) (scalar) maximum allowed size of the larger image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. pad_to_max_dimension: Whether to resize the image and pad it with zeros so the resulting image is of the spatial size [max_dimension, max_dimension]. If masks are included they are padded similarly. per_channel_pad_value: A tuple of per-channel scalar value to use for padding. By default pads zeros. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, channels], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') def _resize_landscape_image(image): # resize a landscape image return tf.image.resize_images( image, tf.stack([min_dimension, max_dimension]), method=method, align_corners=align_corners, preserve_aspect_ratio=True) def _resize_portrait_image(image): # resize a portrait image return tf.image.resize_images( image, tf.stack([max_dimension, min_dimension]), method=method, align_corners=align_corners, preserve_aspect_ratio=True) with tf.name_scope('ResizeToRange', values=[image, min_dimension]): if image.get_shape().is_fully_defined(): if image.get_shape()[0] < image.get_shape()[1]: new_image = _resize_landscape_image(image) else: new_image = _resize_portrait_image(image) new_size = tf.constant(new_image.get_shape().as_list()) else: new_image = tf.cond( tf.less(tf.shape(image)[0], tf.shape(image)[1]), lambda: _resize_landscape_image(image), lambda: _resize_portrait_image(image)) new_size = tf.shape(new_image) if pad_to_max_dimension: channels = tf.unstack(new_image, axis=2) if len(channels) != len(per_channel_pad_value): raise ValueError('Number of channels must be equal to the length of ' 'per-channel pad value.') new_image = tf.stack( [ tf.pad( # pylint: disable=g-complex-comprehension channels[i], [[0, max_dimension - new_size[0]], [0, max_dimension - new_size[1]]], constant_values=per_channel_pad_value[i]) for i in range(len(channels)) ], axis=2) new_image.set_shape([max_dimension, max_dimension, len(channels)]) result = [new_image] if masks is not None: new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_images( new_masks, new_size[:-1], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) if pad_to_max_dimension: new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, max_dimension, max_dimension) new_masks = tf.squeeze(new_masks, 3) result.append(new_masks) result.append(new_size) return result def _get_image_info(image): """Returns the height, width and number of channels in the image.""" image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] num_channels = tf.shape(image)[2] return (image_height, image_width, num_channels) # TODO(alirezafathi): Make sure the static shapes are preserved. def resize_to_min_dimension(image, masks=None, min_dimension=600, method=tf.image.ResizeMethod.BILINEAR): """Resizes image and masks given the min size maintaining the aspect ratio. If one of the image dimensions is smaller than min_dimension, it will scale the image such that its smallest dimension is equal to min_dimension. Otherwise, will keep the image size as is. Args: image: a tensor of size [height, width, channels]. masks: (optional) a tensors of size [num_instances, height, width]. min_dimension: minimum image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. Returns: An array containing resized_image, resized_masks, and resized_image_shape. Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]): (image_height, image_width, num_channels) = _get_image_info(image) min_image_dimension = tf.minimum(image_height, image_width) min_target_dimension = tf.maximum(min_image_dimension, min_dimension) target_ratio = tf.cast(min_target_dimension, dtype=tf.float32) / tf.cast( min_image_dimension, dtype=tf.float32) target_height = tf.cast( tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) target_width = tf.cast( tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) image = tf.image.resize_images( tf.expand_dims(image, axis=0), size=[target_height, target_width], method=method, align_corners=True) result = [tf.squeeze(image, axis=0)] if masks is not None: masks = tf.image.resize_nearest_neighbor( tf.expand_dims(masks, axis=3), size=[target_height, target_width], align_corners=True) result.append(tf.squeeze(masks, axis=3)) result.append(tf.stack([target_height, target_width, num_channels])) return result def resize_to_max_dimension(image, masks=None, max_dimension=600, method=tf.image.ResizeMethod.BILINEAR): """Resizes image and masks given the max size maintaining the aspect ratio. If one of the image dimensions is greater than max_dimension, it will scale the image such that its largest dimension is equal to max_dimension. Otherwise, will keep the image size as is. Args: image: a tensor of size [height, width, channels]. masks: (optional) a tensors of size [num_instances, height, width]. max_dimension: maximum image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. Returns: An array containing resized_image, resized_masks, and resized_image_shape. Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeGivenMaxDimension', values=[image, max_dimension]): (image_height, image_width, num_channels) = _get_image_info(image) max_image_dimension = tf.maximum(image_height, image_width) max_target_dimension = tf.minimum(max_image_dimension, max_dimension) target_ratio = tf.cast(max_target_dimension, dtype=tf.float32) / tf.cast( max_image_dimension, dtype=tf.float32) target_height = tf.cast( tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) target_width = tf.cast( tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) image = tf.image.resize_images( tf.expand_dims(image, axis=0), size=[target_height, target_width], method=method, align_corners=True) result = [tf.squeeze(image, axis=0)] if masks is not None: masks = tf.image.resize_nearest_neighbor( tf.expand_dims(masks, axis=3), size=[target_height, target_width], align_corners=True) result.append(tf.squeeze(masks, axis=3)) result.append(tf.stack([target_height, target_width, num_channels])) return result def resize_pad_to_multiple(image, masks=None, multiple=1): """Resize an image by zero padding it to the specified multiple. For example, with an image of size (101, 199, 3) and multiple=4, the returned image will have shape (104, 200, 3). Args: image: a tensor of shape [height, width, channels] masks: (optional) a tensor of shape [num_instances, height, width] multiple: int, the multiple to which the height and width of the input will be padded. Returns: resized_image: The image with 0 padding applied, such that output dimensions are divisible by `multiple` resized_masks: If masks are given, they are resized to the same spatial dimensions as the image. resized_image_shape: An integer tensor of shape [3] which holds the shape of the input image. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizePadToMultiple', values=[image, multiple]): image_height, image_width, num_channels = _get_image_info(image) image = image[tf.newaxis, :, :, :] image = ops.pad_to_multiple(image, multiple)[0, :, :, :] result = [image] if masks is not None: masks = tf.transpose(masks, (1, 2, 0)) masks = masks[tf.newaxis, :, :, :] masks = ops.pad_to_multiple(masks, multiple)[0, :, :, :] masks = tf.transpose(masks, (2, 0, 1)) result.append(masks) result.append(tf.stack([image_height, image_width, num_channels])) return result def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield def resize_image(image, masks=None, new_height=600, new_width=1024, method=tf.image.ResizeMethod.BILINEAR, align_corners=False): """Resizes images to the given height and width. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. new_height: (optional) (scalar) desired height of the image. new_width: (optional) (scalar) desired width of the image. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. """ with tf.name_scope( 'ResizeImage', values=[image, new_height, new_width, method, align_corners]): new_image = tf.image.resize_images( image, tf.stack([new_height, new_width]), method=method, align_corners=align_corners) image_shape = shape_utils.combined_static_and_dynamic_shape(image) result = [new_image] if masks is not None: num_instances = tf.shape(masks)[0] new_size = tf.stack([new_height, new_width]) def resize_masks_branch(): new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_nearest_neighbor( new_masks, new_size, align_corners=align_corners) new_masks = tf.squeeze(new_masks, axis=3) return new_masks def reshape_masks_branch(): # The shape function will be computed for both branches of the # condition, regardless of which branch is actually taken. Make sure # that we don't trigger an assertion in the shape function when trying # to reshape a non empty tensor into an empty one. new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]]) return new_masks masks = tf.cond(num_instances > 0, resize_masks_branch, reshape_masks_branch) result.append(masks) result.append(tf.stack([new_height, new_width, image_shape[2]])) return result def subtract_channel_mean(image, means=None): """Normalizes an image by subtracting a mean from each channel. Args: image: A 3D tensor of shape [height, width, channels] means: float list containing a mean for each channel Returns: normalized_images: a tensor of shape [height, width, channels] Raises: ValueError: if images is not a 4D tensor or if the number of means is not equal to the number of channels. """ with tf.name_scope('SubtractChannelMean', values=[image, means]): if len(image.get_shape()) != 3: raise ValueError('Input must be of size [height, width, channels]') if len(means) != image.get_shape()[-1]: raise ValueError('len(means) must match the number of channels') return image - [[means]] def one_hot_encoding(labels, num_classes=None): """One-hot encodes the multiclass labels. Example usage: labels = tf.constant([1, 4], dtype=tf.int32) one_hot = OneHotEncoding(labels, num_classes=5) one_hot.eval() # evaluates to [0, 1, 0, 0, 1] Args: labels: A tensor of shape [None] corresponding to the labels. num_classes: Number of classes in the dataset. Returns: onehot_labels: a tensor of shape [num_classes] corresponding to the one hot encoding of the labels. Raises: ValueError: if num_classes is not specified. """ with tf.name_scope('OneHotEncoding', values=[labels]): if num_classes is None: raise ValueError('num_classes must be specified') labels = tf.one_hot(labels, num_classes, 1, 0) return tf.reduce_max(labels, 0) def rgb_to_gray(image): """Converts a 3 channel RGB image to a 1 channel grayscale image. Args: image: Rank 3 float32 tensor containing 1 image -> [height, width, 3] with pixel values varying between [0, 1]. Returns: image: A single channel grayscale image -> [image, height, 1]. """ return _rgb_to_grayscale(image) def random_self_concat_image( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, concat_vertical_probability=0.1, concat_horizontal_probability=0.1, seed=None, preprocess_vars_cache=None): """Randomly concatenates the image with itself. This function randomly concatenates the image with itself; the random variables for vertical and horizontal concatenation are independent. Afterwards, we adjust the old bounding boxes, and add new bounding boxes for the new objects. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 containing the label weights. label_confidences: (optional) rank 1 float32 containing the label confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. concat_vertical_probability: (optional) a tf.float32 scalar denoting the probability of a vertical concatenation. concat_horizontal_probability: (optional) a tf.float32 scalar denoting the probability of a horizontal concatenation. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. if label_confidences is not None also returns: maybe_concat_label_confidences: cropped label weights. if multiclass_scores is not None also returns: maybe_concat_multiclass_scores: cropped_multiclass_scores. """ concat_vertical = (tf.random_uniform([], seed=seed) < concat_vertical_probability) # Note the seed + 1 so we get some semblance of independence even with # fixed seeds. concat_horizontal = (tf.random_uniform([], seed=seed + 1 if seed else None) < concat_horizontal_probability) gen_func = lambda: (concat_vertical, concat_horizontal) params = _get_or_create_preprocess_rand_vars( gen_func, preprocessor_cache.PreprocessorCache.SELF_CONCAT_IMAGE, preprocess_vars_cache) concat_vertical, concat_horizontal = params def _concat_image(image, boxes, labels, label_weights, axis): """Concats the image to itself on `axis`.""" output_images = tf.concat([image, image], axis=axis) if axis == 0: # Concat vertically, so need to reduce the y coordinates. old_scaling = tf.constant([0.5, 1.0, 0.5, 1.0]) new_translation = tf.constant([0.5, 0.0, 0.5, 0.0]) elif axis == 1: old_scaling = tf.constant([1.0, 0.5, 1.0, 0.5]) new_translation = tf.constant([0.0, 0.5, 0.0, 0.5]) old_boxes = old_scaling * boxes new_boxes = old_boxes + new_translation all_boxes = tf.concat([old_boxes, new_boxes], axis=0) return [output_images, all_boxes, tf.tile(labels, [2]), tf.tile( label_weights, [2])] image, boxes, labels, label_weights = tf.cond( concat_vertical, lambda: _concat_image(image, boxes, labels, label_weights, axis=0), lambda: [image, boxes, labels, label_weights], strict=True) outputs = tf.cond( concat_horizontal, lambda: _concat_image(image, boxes, labels, label_weights, axis=1), lambda: [image, boxes, labels, label_weights], strict=True) if label_confidences is not None: label_confidences = tf.cond(concat_vertical, lambda: tf.tile(label_confidences, [2]), lambda: label_confidences) outputs.append(tf.cond(concat_horizontal, lambda: tf.tile(label_confidences, [2]), lambda: label_confidences)) if multiclass_scores is not None: multiclass_scores = tf.cond(concat_vertical, lambda: tf.tile(multiclass_scores, [2, 1]), lambda: multiclass_scores) outputs.append(tf.cond(concat_horizontal, lambda: tf.tile(multiclass_scores, [2, 1]), lambda: multiclass_scores)) return outputs def ssd_random_crop(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 tensor containing the weights. label_confidences: rank 1 float32 tensor containing the confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def random_crop_selector(selected_result, index): """Applies random_crop_image to selected result. Args: selected_result: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). index: The index that was randomly selected. Returns: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). """ i = 3 image, boxes, labels = selected_result[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None selected_masks = None selected_keypoints = None if label_weights is not None: selected_label_weights = selected_result[i] i += 1 if label_confidences is not None: selected_label_confidences = selected_result[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = selected_result[i] i += 1 if masks is not None: selected_masks = selected_result[i] i += 1 if keypoints is not None: selected_keypoints = selected_result[i] return random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, masks=selected_masks, keypoints=selected_keypoints, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = _apply_with_random_selector_tuples( tuple( t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores, masks, keypoints) if t is not None), random_crop_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID) return result def ssd_random_crop_pad(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 6, area_range=((0.1, 1.0),) * 6, overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 6, random_coef=(0.15,) * 6, min_padded_size_ratio=((1.0, 1.0),) * 6, max_padded_size_ratio=((2.0, 2.0),) * 6, pad_color=(None,) * 6, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. new_labels: new labels. new_label_weights: new label weights. """ def random_crop_pad_selector(image_boxes_labels, index): """Random crop preprocessing helper.""" i = 3 image, boxes, labels = image_boxes_labels[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None if label_weights is not None: selected_label_weights = image_boxes_labels[i] i += 1 if label_confidences is not None: selected_label_confidences = image_boxes_labels[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = image_boxes_labels[i] return random_crop_pad_image( image, boxes, labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], min_padded_size_ratio=min_padded_size_ratio[index], max_padded_size_ratio=max_padded_size_ratio[index], pad_color=pad_color[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) return _apply_with_random_selector_tuples( tuple(t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores) if t is not None), random_crop_pad_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID) def ssd_random_crop_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that the aspect ratio of the crops are fixed. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: aspect ratio of the cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If multiclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range) crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_crop_to_aspect_ratio( new_image, new_boxes, new_labels, label_weights=new_label_weights, label_confidences=new_label_confidences, multiclass_scores=new_multiclass_scores, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, clip_boxes=clip_boxes, seed=seed, preprocess_vars_cache=preprocess_vars_cache) return result def ssd_random_crop_pad_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Random crop and pad preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that after the initial crop, images are zero-padded to a fixed aspect ratio instead of being resized to that aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: the final aspect ratio to pad to. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If multiclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_pad_to_aspect_ratio( new_image, new_boxes, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, min_padded_size_ratio=min_padded_size_ratio, max_padded_size_ratio=max_padded_size_ratio, seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = list(result) i = 3 result.insert(2, new_labels) if new_label_weights is not None: result.insert(i, new_label_weights) i += 1 if new_label_confidences is not None: result.insert(i, new_label_confidences) i += 1 if multiclass_scores is not None: result.insert(i, new_multiclass_scores) result = tuple(result) return result def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0): """Converts multiclass logits to softmax scores after applying temperature. Args: multiclass_scores: float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. temperature: Scale factor to use prior to applying softmax. Larger temperatures give more uniform distruibutions after softmax. Returns: multiclass_scores: float32 tensor of shape [num_instances, num_classes] with scaling and softmax applied. """ # Multiclass scores must be stored as logits. Apply temp and softmax. multiclass_scores_scaled = tf.multiply( multiclass_scores, 1.0 / temperature, name='scale_logits') multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax') return multiclass_scores def _get_crop_border(border, size): border = tf.cast(border, tf.float32) size = tf.cast(size, tf.float32) i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0)) divisor = tf.pow(2.0, i) divisor = tf.clip_by_value(divisor, 1, border) divisor = tf.cast(divisor, tf.int32) return tf.cast(border, tf.int32) // divisor def random_square_crop_by_scale(image, boxes, labels, label_weights, label_confidences=None, masks=None, keypoints=None, max_border=128, scale_min=0.6, scale_max=1.3, num_scales=8, seed=None, preprocess_vars_cache=None): """Randomly crop a square in proportion to scale and image size. Extract a square sized crop from an image whose side length is sampled by randomly scaling the maximum spatial dimension of the image. If part of the crop falls outside the image, it is filled with zeros. The augmentation is borrowed from [1] [1]: https://arxiv.org/abs/1904.07850 Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Boxes on the crop boundary are clipped to the boundary and boxes falling outside the crop are ignored. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. max_border: The maximum size of the border. The border defines distance in pixels to the image boundaries that will not be considered as a center of a crop. To make sure that the border does not go over the center of the image, we chose the border value by computing the minimum k, such that (max_border / (2**k)) < image_dimension/2. scale_min: float, the minimum value for scale. scale_max: float, the maximum value for scale. num_scales: int, the number of discrete scale values to sample between [scale_min, scale_max] seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. label_weights: rank 1 float32 tensor with shape [num_instances]. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. """ img_shape = tf.shape(image) height, width = img_shape[0], img_shape[1] scales = tf.linspace(scale_min, scale_max, num_scales) scale = _get_or_create_preprocess_rand_vars( lambda: scales[_random_integer(0, num_scales, seed)], preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, preprocess_vars_cache, 'scale') image_size = scale * tf.cast(tf.maximum(height, width), tf.float32) image_size = tf.cast(image_size, tf.int32) h_border = _get_crop_border(max_border, height) w_border = _get_crop_border(max_border, width) def y_function(): y = _random_integer(h_border, tf.cast(height, tf.int32) - h_border + 1, seed) return y def x_function(): x = _random_integer(w_border, tf.cast(width, tf.int32) - w_border + 1, seed) return x y_center = _get_or_create_preprocess_rand_vars( y_function, preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, preprocess_vars_cache, 'y_center') x_center = _get_or_create_preprocess_rand_vars( x_function, preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, preprocess_vars_cache, 'x_center') half_size = tf.cast(image_size / 2, tf.int32) crop_ymin, crop_ymax = y_center - half_size, y_center + half_size crop_xmin, crop_xmax = x_center - half_size, x_center + half_size ymin = tf.maximum(crop_ymin, 0) xmin = tf.maximum(crop_xmin, 0) ymax = tf.minimum(crop_ymax, height - 1) xmax = tf.minimum(crop_xmax, width - 1) cropped_image = image[ymin:ymax, xmin:xmax] offset_y = tf.maximum(0, ymin - crop_ymin) offset_x = tf.maximum(0, xmin - crop_xmin) oy_i = offset_y ox_i = offset_x output_image = tf.image.pad_to_bounding_box( cropped_image, offset_height=oy_i, offset_width=ox_i, target_height=image_size, target_width=image_size) if ymin == 0: # We might be padding the image. box_ymin = -offset_y else: box_ymin = crop_ymin if xmin == 0: # We might be padding the image. box_xmin = -offset_x else: box_xmin = crop_xmin box_ymax = box_ymin + image_size box_xmax = box_xmin + image_size image_box = [box_ymin / height, box_xmin / width, box_ymax / height, box_xmax / width] boxlist = box_list.BoxList(boxes) boxlist = box_list_ops.change_coordinate_frame(boxlist, image_box) boxlist, indices = box_list_ops.prune_completely_outside_window( boxlist, [0.0, 0.0, 1.0, 1.0]) boxlist = box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False) return_values = [output_image, boxlist.get(), tf.gather(labels, indices), tf.gather(label_weights, indices)] if label_confidences is not None: return_values.append(tf.gather(label_confidences, indices)) if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = new_masks[:, ymin:ymax, xmin:xmax] new_masks = tf.image.pad_to_bounding_box( new_masks, oy_i, ox_i, image_size, image_size) new_masks = tf.squeeze(new_masks, [-1]) return_values.append(tf.gather(new_masks, indices)) if keypoints is not None: keypoints = tf.gather(keypoints, indices) keypoints = keypoint_ops.change_coordinate_frame(keypoints, image_box) keypoints = keypoint_ops.prune_outside_window(keypoints, [0.0, 0.0, 1.0, 1.0]) return_values.append(keypoints) return return_values def random_scale_crop_and_pad_to_square( image, boxes, labels, label_weights, masks=None, keypoints=None, label_confidences=None, scale_min=0.1, scale_max=2.0, output_size=512, resize_method=tf.image.ResizeMethod.BILINEAR, seed=None): """Randomly scale, crop, and then pad an image to fixed square dimensions. Randomly scale, crop, and then pad an image to the desired square output dimensions. Specifically, this method first samples a random_scale factor from a uniform distribution between scale_min and scale_max, and then resizes the image such that it's maximum dimension is (output_size * random_scale). Secondly, a square output_size crop is extracted from the resized image (note, this will only occur when random_scale > 1.0). Lastly, the cropped region is padded to the desired square output_size, by filling with zeros. The augmentation is borrowed from [1] [1]: https://arxiv.org/abs/1911.09070 Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Boxes on the crop boundary are clipped to the boundary and boxes falling outside the crop are ignored. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. label_confidences: (optional) float32 tensor of shape [num_instance] representing the confidence for each box. scale_min: float, the minimum value for the random scale factor. scale_max: float, the maximum value for the random scale factor. output_size: int, the desired (square) output image size. resize_method: tf.image.ResizeMethod, resize method to use when scaling the input images. seed: random seed. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. label_weights: rank 1 float32 tensor with shape [num_instances]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. label_confidences: confidences for retained boxes. """ img_shape = tf.shape(image) input_height, input_width = img_shape[0], img_shape[1] random_scale = tf.random_uniform([], scale_min, scale_max, seed=seed) # Compute the scaled height and width from the random scale. max_input_dim = tf.cast(tf.maximum(input_height, input_width), tf.float32) input_ar_y = tf.cast(input_height, tf.float32) / max_input_dim input_ar_x = tf.cast(input_width, tf.float32) / max_input_dim scaled_height = tf.cast(random_scale * output_size * input_ar_y, tf.int32) scaled_width = tf.cast(random_scale * output_size * input_ar_x, tf.int32) # Compute the offsets: offset_y = tf.cast(scaled_height - output_size, tf.float32) offset_x = tf.cast(scaled_width - output_size, tf.float32) offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1, seed=seed) offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1, seed=seed) offset_y = tf.cast(offset_y, tf.int32) offset_x = tf.cast(offset_x, tf.int32) # Scale, crop, and pad the input image. scaled_image = tf.image.resize_images( image, [scaled_height, scaled_width], method=resize_method) scaled_image = scaled_image[offset_y:offset_y + output_size, offset_x:offset_x + output_size, :] output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, output_size, output_size) # Update the boxes. new_window = tf.cast( tf.stack([offset_y, offset_x, offset_y + output_size, offset_x + output_size]), dtype=tf.float32) new_window /= tf.cast( tf.stack([scaled_height, scaled_width, scaled_height, scaled_width]), dtype=tf.float32) boxlist = box_list.BoxList(boxes) boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) boxlist, indices = box_list_ops.prune_completely_outside_window( boxlist, [0.0, 0.0, 1.0, 1.0]) boxlist = box_list_ops.clip_to_window( boxlist, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False) return_values = [output_image, boxlist.get(), tf.gather(labels, indices), tf.gather(label_weights, indices)] if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = tf.image.resize_images( new_masks, [scaled_height, scaled_width], method=resize_method) new_masks = new_masks[:, offset_y:offset_y + output_size, offset_x:offset_x + output_size, :] new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, output_size, output_size) new_masks = tf.squeeze(new_masks, [-1]) return_values.append(tf.gather(new_masks, indices)) if keypoints is not None: keypoints = tf.gather(keypoints, indices) keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window) keypoints = keypoint_ops.prune_outside_window( keypoints, [0.0, 0.0, 1.0, 1.0]) return_values.append(keypoints) if label_confidences is not None: return_values.append(tf.gather(label_confidences, indices)) return return_values def get_default_func_arg_map(include_label_weights=True, include_label_confidences=False, include_multiclass_scores=False, include_instance_masks=False, include_instance_mask_weights=False, include_keypoints=False, include_keypoint_visibilities=False, include_dense_pose=False, include_keypoint_depths=False): """Returns the default mapping from a preprocessor function to its args. Args: include_label_weights: If True, preprocessing functions will modify the label weights, too. include_label_confidences: If True, preprocessing functions will modify the label confidences, too. include_multiclass_scores: If True, preprocessing functions will modify the multiclass scores, too. include_instance_masks: If True, preprocessing functions will modify the instance masks, too. include_instance_mask_weights: If True, preprocessing functions will modify the instance mask weights. include_keypoints: If True, preprocessing functions will modify the keypoints, too. include_keypoint_visibilities: If True, preprocessing functions will modify the keypoint visibilities, too. include_dense_pose: If True, preprocessing functions will modify the DensePose labels, too. include_keypoint_depths: If True, preprocessing functions will modify the keypoint depth labels, too. Returns: A map from preprocessing functions to the arguments they receive. """ groundtruth_label_weights = None if include_label_weights: groundtruth_label_weights = ( fields.InputDataFields.groundtruth_weights) groundtruth_label_confidences = None if include_label_confidences: groundtruth_label_confidences = ( fields.InputDataFields.groundtruth_confidences) multiclass_scores = None if include_multiclass_scores: multiclass_scores = (fields.InputDataFields.multiclass_scores) groundtruth_instance_masks = None if include_instance_masks: groundtruth_instance_masks = ( fields.InputDataFields.groundtruth_instance_masks) groundtruth_instance_mask_weights = None if include_instance_mask_weights: groundtruth_instance_mask_weights = ( fields.InputDataFields.groundtruth_instance_mask_weights) groundtruth_keypoints = None if include_keypoints: groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints groundtruth_keypoint_visibilities = None if include_keypoint_visibilities: groundtruth_keypoint_visibilities = ( fields.InputDataFields.groundtruth_keypoint_visibilities) groundtruth_dp_num_points = None groundtruth_dp_part_ids = None groundtruth_dp_surface_coords = None if include_dense_pose: groundtruth_dp_num_points = ( fields.InputDataFields.groundtruth_dp_num_points) groundtruth_dp_part_ids = ( fields.InputDataFields.groundtruth_dp_part_ids) groundtruth_dp_surface_coords = ( fields.InputDataFields.groundtruth_dp_surface_coords) groundtruth_keypoint_depths = None groundtruth_keypoint_depth_weights = None if include_keypoint_depths: groundtruth_keypoint_depths = ( fields.InputDataFields.groundtruth_keypoint_depths) groundtruth_keypoint_depth_weights = ( fields.InputDataFields.groundtruth_keypoint_depth_weights) prep_func_arg_map = { normalize_image: (fields.InputDataFields.image,), random_horizontal_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, groundtruth_keypoint_visibilities, groundtruth_dp_part_ids, groundtruth_dp_surface_coords, groundtruth_keypoint_depths, groundtruth_keypoint_depth_weights, ), random_vertical_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_rotation90: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_pixel_value_scale: (fields.InputDataFields.image,), random_image_scale: ( fields.InputDataFields.image, groundtruth_instance_masks, ), random_rgb_to_gray: (fields.InputDataFields.image,), random_adjust_brightness: (fields.InputDataFields.image,), random_adjust_contrast: (fields.InputDataFields.image,), random_adjust_hue: (fields.InputDataFields.image,), random_adjust_saturation: (fields.InputDataFields.image,), random_distort_color: (fields.InputDataFields.image,), random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,), random_crop_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_instance_mask_weights, groundtruth_keypoints, groundtruth_keypoint_visibilities, groundtruth_dp_num_points, groundtruth_dp_part_ids, groundtruth_dp_surface_coords), random_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, groundtruth_dp_surface_coords), random_absolute_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, groundtruth_dp_surface_coords), random_crop_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), random_crop_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), random_pad_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_black_patches: (fields.InputDataFields.image,), random_jpeg_quality: (fields.InputDataFields.image,), random_downscale_to_target_pixels: ( fields.InputDataFields.image, groundtruth_instance_masks, ), random_patch_gaussian: (fields.InputDataFields.image,), autoaugment_image: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, ), retain_boxes_above_threshold: ( fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), drop_label_probabilistically: ( fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), remap_labels: (fields.InputDataFields.groundtruth_classes,), image_to_float: (fields.InputDataFields.image,), random_resize_method: (fields.InputDataFields.image,), resize_to_range: ( fields.InputDataFields.image, groundtruth_instance_masks, ), resize_to_min_dimension: ( fields.InputDataFields.image, groundtruth_instance_masks, ), scale_boxes_to_pixel_coordinates: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_keypoints, ), resize_image: ( fields.InputDataFields.image, groundtruth_instance_masks, ), subtract_channel_mean: (fields.InputDataFields.image,), one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,), rgb_to_gray: (fields.InputDataFields.image,), random_self_concat_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), ssd_random_crop: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), ssd_random_crop_pad: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), ssd_random_crop_fixed_aspect_ratio: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints ), ssd_random_crop_pad_fixed_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), convert_class_logits_to_softmax: (multiclass_scores,), random_square_crop_by_scale: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, groundtruth_instance_masks, groundtruth_keypoints), random_scale_crop_and_pad_to_square: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_instance_masks, groundtruth_keypoints, groundtruth_label_confidences), adjust_gamma: (fields.InputDataFields.image,), } return prep_func_arg_map def preprocess(tensor_dict, preprocess_options, func_arg_map=None, preprocess_vars_cache=None): """Preprocess images and bounding boxes. Various types of preprocessing (to be implemented) based on the preprocess_options dictionary e.g. "crop image" (affects image and possibly boxes), "white balance image" (affects only image), etc. If self._options is None, no preprocessing is done. Args: tensor_dict: dictionary that contains images, boxes, and can contain other things as well. images-> rank 4 float32 tensor contains 1 image -> [1, height, width, 3]. with pixel values varying between [0, 1] boxes-> rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. preprocess_options: It is a list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. func_arg_map: mapping from preprocessing functions to arguments that they expect to receive and return. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: tensor_dict: which contains the preprocessed images, bounding boxes, etc. Raises: ValueError: (a) If the functions passed to Preprocess are not in func_arg_map. (b) If the arguments that a function needs do not exist in tensor_dict. (c) If image in tensor_dict is not rank 4 """ if func_arg_map is None: func_arg_map = get_default_func_arg_map() # changes the images to image (rank 4 to rank 3) since the functions # receive rank 3 tensor for image if fields.InputDataFields.image in tensor_dict: images = tensor_dict[fields.InputDataFields.image] if len(images.get_shape()) != 4: raise ValueError('images in tensor_dict should be rank 4') image = tf.squeeze(images, axis=0) tensor_dict[fields.InputDataFields.image] = image # Preprocess inputs based on preprocess_options for option in preprocess_options: func, params = option if func not in func_arg_map: raise ValueError('The function %s does not exist in func_arg_map' % (func.__name__)) arg_names = func_arg_map[func] for a in arg_names: if a is not None and a not in tensor_dict: raise ValueError('The function %s requires argument %s' % (func.__name__, a)) def get_arg(key): return tensor_dict[key] if key is not None else None args = [get_arg(a) for a in arg_names] if preprocess_vars_cache is not None: if six.PY2: # pylint: disable=deprecated-method arg_spec = inspect.getargspec(func) # pylint: enable=deprecated-method else: arg_spec = inspect.getfullargspec(func) if 'preprocess_vars_cache' in arg_spec.args: params['preprocess_vars_cache'] = preprocess_vars_cache results = func(*args, **params) if not isinstance(results, (list, tuple)): results = (results,) # Removes None args since the return values will not contain those. arg_names = [arg_name for arg_name in arg_names if arg_name is not None] for res, arg_name in zip(results, arg_names): tensor_dict[arg_name] = res # changes the image to images (rank 3 to rank 4) to be compatible to what # we received in the first place if fields.InputDataFields.image in tensor_dict: image = tensor_dict[fields.InputDataFields.image] images = tf.expand_dims(image, 0) tensor_dict[fields.InputDataFields.image] = images return tensor_dict
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/preprocessor.py
preprocessor.py
"""Provides functions to batch a dictionary of input tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import prefetcher rt_shape_str = '_runtime_shapes' class BatchQueue(object): """BatchQueue class. This class creates a batch queue to asynchronously enqueue tensors_dict. It also adds a FIFO prefetcher so that the batches are readily available for the consumers. Dequeue ops for a BatchQueue object can be created via the Dequeue method which evaluates to a batch of tensor_dict. Example input pipeline with batching: ------------------------------------ key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) batch_queue = batcher.BatchQueue(tensor_dict, batch_size=32, batch_queue_capacity=2000, num_batch_queue_threads=8, prefetch_queue_capacity=20) tensor_dict = batch_queue.dequeue() outputs = Model(tensor_dict) ... ----------------------------------- Notes: ----- This class batches tensors of unequal sizes by zero padding and unpadding them after generating a batch. This can be computationally expensive when batching tensors (such as images) that are of vastly different sizes. So it is recommended that the shapes of such tensors be fully defined in tensor_dict while other lightweight tensors such as bounding box corners and class labels can be of varying sizes. Use either crop or resize operations to fully define the shape of an image in tensor_dict. It is also recommended to perform any preprocessing operations on tensors before passing to BatchQueue and subsequently calling the Dequeue method. Another caveat is that this class does not read the last batch if it is not full. The current implementation makes it hard to support that use case. So, for evaluation, when it is critical to run all the examples through your network use the input pipeline example mentioned in core/prefetcher.py. """ def __init__(self, tensor_dict, batch_size, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity): """Constructs a batch queue holding tensor_dict. Args: tensor_dict: dictionary of tensors to batch. batch_size: batch size. batch_queue_capacity: max capacity of the queue from which the tensors are batched. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: max capacity of the queue used to prefetch assembled batches. """ # Remember static shapes to set shapes of batched tensors. static_shapes = collections.OrderedDict( {key: tensor.get_shape() for key, tensor in tensor_dict.items()}) # Remember runtime shapes to unpad tensors after batching. runtime_shapes = collections.OrderedDict( {(key + rt_shape_str): tf.shape(tensor) for key, tensor in tensor_dict.items()}) all_tensors = tensor_dict all_tensors.update(runtime_shapes) batched_tensors = tf.train.batch( all_tensors, capacity=batch_queue_capacity, batch_size=batch_size, dynamic_pad=True, num_threads=num_batch_queue_threads) self._queue = prefetcher.prefetch(batched_tensors, prefetch_queue_capacity) self._static_shapes = static_shapes self._batch_size = batch_size def dequeue(self): """Dequeues a batch of tensor_dict from the BatchQueue. TODO: use allow_smaller_final_batch to allow running over the whole eval set Returns: A list of tensor_dicts of the requested batch_size. """ batched_tensors = self._queue.dequeue() # Separate input tensors from tensors containing their runtime shapes. tensors = {} shapes = {} for key, batched_tensor in batched_tensors.items(): unbatched_tensor_list = tf.unstack(batched_tensor) for i, unbatched_tensor in enumerate(unbatched_tensor_list): if rt_shape_str in key: shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor else: tensors[(key, i)] = unbatched_tensor # Undo that padding using shapes and create a list of size `batch_size` that # contains tensor dictionaries. tensor_dict_list = [] batch_size = self._batch_size for batch_id in range(batch_size): tensor_dict = {} for key in self._static_shapes: tensor_dict[key] = tf.slice(tensors[(key, batch_id)], tf.zeros_like(shapes[(key, batch_id)]), shapes[(key, batch_id)]) tensor_dict[key].set_shape(self._static_shapes[key]) tensor_dict_list.append(tensor_dict) return tensor_dict_list
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/batcher.py
batcher.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops from object_detection.utils import shape_utils class Loss(six.with_metaclass(abc.ABCMeta, object)): """Abstract base class for loss functions.""" def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, losses_mask=None, scope=None, **params): """Call the loss function. Args: prediction_tensor: an N-d tensor of shape [batch, anchors, ...] representing predicted quantities. target_tensor: an N-d tensor of shape [batch, anchors, ...] representing regression or classification targets. ignore_nan_targets: whether to ignore nan targets in the loss computation. E.g. can be used if the target tensor is missing groundtruth data that shouldn't be factored into the loss. losses_mask: A [batch] boolean tensor that indicates whether losses should be applied to individual images in the batch. For elements that are False, corresponding prediction, target, and weight tensors will not contribute to loss computation. If None, no filtering will take place prior to loss computation. scope: Op scope name. Defaults to 'Loss' if None. **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function. """ with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope: if ignore_nan_targets: target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor) if losses_mask is not None: tensor_multiplier = self._get_loss_multiplier_for_tensor( prediction_tensor, losses_mask) prediction_tensor *= tensor_multiplier target_tensor *= tensor_multiplier if 'weights' in params: params['weights'] = tf.convert_to_tensor(params['weights']) weights_multiplier = self._get_loss_multiplier_for_tensor( params['weights'], losses_mask) params['weights'] *= weights_multiplier return self._compute_loss(prediction_tensor, target_tensor, **params) def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) @abc.abstractmethod def _compute_loss(self, prediction_tensor, target_tensor, **params): """Method to be overridden by implementations. Args: prediction_tensor: a tensor representing predicted quantities target_tensor: a tensor representing regression or classification targets **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per anchor """ pass class WeightedL2LocalizationLoss(Loss): """L2 localization loss function with anchorwise output support. Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) return tf.reduce_sum(square_diff, 2) class WeightedSmoothL1LocalizationLoss(Loss): """Smooth L1 localization loss function aka Huber Loss.. The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and delta * (|x|- 0.5*delta) otherwise, where x is the difference between predictions and target. See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) """ def __init__(self, delta=1.0): """Constructor. Args: delta: delta for smooth L1 loss. """ super(WeightedSmoothL1LocalizationLoss, self).__init__() self._delta = delta def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.reduce_sum(tf.losses.huber_loss( target_tensor, prediction_tensor, delta=self._delta, weights=tf.expand_dims(weights, axis=2), loss_collection=None, reduction=tf.losses.Reduction.NONE ), axis=2) class WeightedIOULocalizationLoss(Loss): """IOU localization loss function. Sums the IOU for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - IOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes) return tf.reshape(weights, [-1]) * per_anchor_iou_loss class WeightedGIOULocalizationLoss(Loss): """GIOU localization loss function. Sums the GIOU loss for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - GIOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ batch_size, num_anchors, _ = shape_utils.combined_static_and_dynamic_shape( prediction_tensor) predicted_boxes = tf.reshape(prediction_tensor, [-1, 4]) target_boxes = tf.reshape(target_tensor, [-1, 4]) per_anchor_iou_loss = 1 - ops.giou(predicted_boxes, target_boxes) return tf.reshape(tf.reshape(weights, [-1]) * per_anchor_iou_loss, [batch_size, num_anchors]) class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class WeightedDiceClassificationLoss(Loss): """Dice loss for classification [1][2]. [1]: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient [2]: https://arxiv.org/abs/1606.04797 """ def __init__(self, squared_normalization): """Initializes the loss object. Args: squared_normalization: boolean, if set, we square the probabilities in the denominator term used for normalization. """ self._squared_normalization = squared_normalization super(WeightedDiceClassificationLoss, self).__init__() def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Computes the loss value. Dice loss uses the area of the ground truth and prediction tensors for normalization. We compute area by summing along the anchors (2nd) dimension. Args: prediction_tensor: A float tensor of shape [batch_size, num_pixels, num_classes] representing the predicted logits for each class. num_pixels denotes the total number of pixels in the spatial dimensions of the mask after flattening. target_tensor: A float tensor of shape [batch_size, num_pixels, num_classes] representing one-hot encoded classification targets. num_pixels denotes the total number of pixels in the spatial dimensions of the mask after flattening. weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) prob_tensor = tf.nn.sigmoid(prediction_tensor) if self._squared_normalization: prob_tensor = tf.pow(prob_tensor, 2) target_tensor = tf.pow(target_tensor, 2) prob_tensor *= weights target_tensor *= weights prediction_area = tf.reduce_sum(prob_tensor, axis=1) gt_area = tf.reduce_sum(target_tensor, axis=1) intersection = tf.reduce_sum(prob_tensor * target_tensor, axis=1) dice_coeff = 2 * intersection / tf.maximum(gt_area + prediction_area, 1.0) dice_loss = 1 - dice_coeff return dice_loss class SigmoidFocalClassificationLoss(Loss): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. """ super(SigmoidFocalClassificationLoss, self).__init__() self._alpha = alpha self._gamma = gamma def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) prediction_probabilities = tf.sigmoid(prediction_tensor) p_t = ((target_tensor * prediction_probabilities) + ((1 - target_tensor) * (1 - prediction_probabilities))) modulating_factor = 1.0 if self._gamma: modulating_factor = tf.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = (target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha)) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights class WeightedSoftmaxClassificationLoss(Loss): """Softmax loss function.""" def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the prediction is "diffused" and when this value is low, the prediction is made peakier. (default 1.0) """ super(WeightedSoftmaxClassificationLoss, self).__init__() self._logit_scale = logit_scale def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] prediction_tensor = tf.divide( prediction_tensor, self._logit_scale, name='scale_logit') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): """Softmax loss function against logits. Targets are expected to be provided in logits space instead of "one hot" or "probability distribution" space. """ def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the target is "diffused" and when this value is low, the target is made peakier. (default 1.0) """ super(WeightedSoftmaxClassificationAgainstLogitsLoss, self).__init__() self._logit_scale = logit_scale def _scale_and_softmax_logits(self, logits): """Scale logits then apply softmax.""" scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') return tf.nn.softmax(scaled_logits, name='convert_scores') def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing logit classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] target_tensor = self._scale_and_softmax_logits(target_tensor) prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, name='scale_logits') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class BootstrappedSigmoidClassificationLoss(Loss): """Bootstrapped sigmoid cross entropy classification loss function. This loss uses a convex combination of training labels and the current model's predictions as training targets in the classification loss. The idea is that as the model improves over time, its predictions can be trusted more and we can use these predictions to mitigate the damage of noisy/incorrect labels, because incorrect labels are likely to be eventually highly inconsistent with other stimuli predicted to have the same label by the model. In "soft" bootstrapping, we use all predicted class probabilities, whereas in "hard" bootstrapping, we use the single class favored by the model. See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by Reed et al. (ICLR 2015). """ def __init__(self, alpha, bootstrap_type='soft'): """Constructor. Args: alpha: a float32 scalar tensor between 0 and 1 representing interpolation weight bootstrap_type: set to either 'hard' or 'soft' (default) Raises: ValueError: if bootstrap_type is not either 'hard' or 'soft' """ super(BootstrappedSigmoidClassificationLoss, self).__init__() if bootstrap_type != 'hard' and bootstrap_type != 'soft': raise ValueError('Unrecognized bootstrap_type: must be one of ' '\'hard\' or \'soft.\'') self._alpha = alpha self._bootstrap_type = bootstrap_type def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.cast( tf.sigmoid(prediction_tensor) > 0.5, tf.float32) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=bootstrap_target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class HardExampleMiner(object): """Hard example mining for regions in a list of images. Implements hard example mining to select a subset of regions to be back-propagated. For each image, selects the regions with highest losses, subject to the condition that a newly selected region cannot have an IOU > iou_threshold with any of the previously selected regions. This can be achieved by re-using a greedy non-maximum suppression algorithm. A constraint on the number of negatives mined per positive region can also be enforced. Reference papers: "Training Region-based Object Detectors with Online Hard Example Mining" (CVPR 2016) by Srivastava et al., and "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. """ def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0): """Constructor. The hard example mining implemented by this class can replicate the behavior in the two aforementioned papers (Srivastava et al., and Liu et al). To replicate the A2 paper (Srivastava et al), num_hard_examples is set to a fixed parameter (64 by default) and iou_threshold is set to .7 for running non-max-suppression the predicted boxes prior to hard mining. In order to replicate the SSD paper (Liu et al), num_hard_examples should be set to None, max_negatives_per_positive should be 3 and iou_threshold should be 1.0 (in order to effectively turn off NMS). Args: num_hard_examples: maximum number of hard examples to be selected per image (prior to enforcing max negative to positive ratio constraint). If set to None, all examples obtained after NMS are considered. iou_threshold: minimum intersection over union for an example to be discarded during NMS. loss_type: use only classification losses ('cls', default), localization losses ('loc') or both losses ('both'). In the last case, cls_loss_weight and loc_loss_weight are used to compute weighted sum of the two losses. cls_loss_weight: weight for classification loss. loc_loss_weight: weight for location loss. max_negatives_per_positive: maximum number of negatives to retain for each positive anchor. By default, num_negatives_per_positive is None, which means that we do not enforce a prespecified negative:positive ratio. Note also that num_negatives_per_positives can be a float (and will be converted to be a float even if it is passed in otherwise). min_negatives_per_image: minimum number of negative anchors to sample for a given image. Setting this to a positive number allows sampling negatives in an image without any positive anchors and thus not biased towards at least one detection per image. """ self._num_hard_examples = num_hard_examples self._iou_threshold = iou_threshold self._loss_type = loss_type self._cls_loss_weight = cls_loss_weight self._loc_loss_weight = loc_loss_weight self._max_negatives_per_positive = max_negatives_per_positive self._min_negatives_per_image = min_negatives_per_image if self._max_negatives_per_positive is not None: self._max_negatives_per_positive = float(self._max_negatives_per_positive) self._num_positives_list = None self._num_negatives_list = None def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None): """Computes localization and classification losses after hard mining. Args: location_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise localization losses. cls_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise classification losses. decoded_boxlist_list: a list of decoded BoxList representing location predictions for each image. match_list: an optional list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Match objects in match_list are used to reference which anchors are positive, negative or ignored. If self._max_negatives_per_positive exists, these are then used to enforce a prespecified negative to positive ratio. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. Raises: ValueError: if location_losses, cls_losses and decoded_boxlist_list do not have compatible shapes (i.e., they must correspond to the same number of images). ValueError: if match_list is specified but its length does not match len(decoded_boxlist_list). """ mined_location_losses = [] mined_cls_losses = [] location_losses = tf.unstack(location_losses) cls_losses = tf.unstack(cls_losses) num_images = len(decoded_boxlist_list) if not match_list: match_list = num_images * [None] if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' 'do not have compatible shapes.') if not isinstance(match_list, list): raise ValueError('match_list must be a list.') if len(match_list) != len(decoded_boxlist_list): raise ValueError('match_list must either be None or have ' 'length=len(decoded_boxlist_list).') num_positives_list = [] num_negatives_list = [] for ind, detection_boxlist in enumerate(decoded_boxlist_list): box_locations = detection_boxlist.get() match = match_list[ind] image_losses = cls_losses[ind] if self._loss_type == 'loc': image_losses = location_losses[ind] elif self._loss_type == 'both': image_losses *= self._cls_loss_weight image_losses += location_losses[ind] * self._loc_loss_weight if self._num_hard_examples is not None: num_hard_examples = self._num_hard_examples else: num_hard_examples = detection_boxlist.num_boxes() selected_indices = tf.image.non_max_suppression( box_locations, image_losses, num_hard_examples, self._iou_threshold) if self._max_negatives_per_positive is not None and match: (selected_indices, num_positives, num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( selected_indices, match, self._max_negatives_per_positive, self._min_negatives_per_image) num_positives_list.append(num_positives) num_negatives_list.append(num_negatives) mined_location_losses.append( tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) mined_cls_losses.append( tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) if match and self._max_negatives_per_positive: self._num_positives_list = num_positives_list self._num_negatives_list = num_negatives_list return (location_loss, cls_loss) def summarize(self): """Summarize the number of positives and negatives after mining.""" if self._num_positives_list and self._num_negatives_list: avg_num_positives = tf.reduce_mean( tf.cast(self._num_positives_list, dtype=tf.float32)) avg_num_negatives = tf.reduce_mean( tf.cast(self._num_negatives_list, dtype=tf.float32)) tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0): """Subsample a collection of selected indices to a desired neg:pos ratio. This function takes a subset of M indices (indexing into a large anchor collection of N anchors where M<N) which are labeled as positive/negative via a Match object (matched indices are positive, unmatched indices are negative). It returns a subset of the provided indices retaining all positives as well as up to the first K negatives, where: K=floor(num_negative_per_positive * num_positives). For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors), with positives=[2, 5] and negatives=[4, 7, 9, 10] and num_negatives_per_positive=1, then the returned subset of indices is [2, 4, 5, 7]. Args: indices: An integer tensor of shape [M] representing a collection of selected anchor indices match: A matcher.Match object encoding the match between anchors and groundtruth boxes for a given image, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. max_negatives_per_positive: (float) maximum number of negatives for each positive anchor. min_negatives_per_image: minimum number of negative anchors for a given image. Allow sampling negatives in image without any positive anchors. Returns: selected_indices: An integer tensor of shape [M'] representing a collection of selected anchor indices with M' <= M. num_positives: An integer tensor representing the number of positive examples in selected set of indices. num_negatives: An integer tensor representing the number of negative examples in selected set of indices. """ positives_indicator = tf.gather(match.matched_column_indicator(), indices) negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices) num_positives = tf.reduce_sum(tf.cast(positives_indicator, dtype=tf.int32)) max_negatives = tf.maximum( min_negatives_per_image, tf.cast(max_negatives_per_positive * tf.cast(num_positives, dtype=tf.float32), dtype=tf.int32)) topk_negatives_indicator = tf.less_equal( tf.cumsum(tf.cast(negatives_indicator, dtype=tf.int32)), max_negatives) subsampled_selection_indices = tf.where( tf.logical_or(positives_indicator, topk_negatives_indicator)) num_negatives = tf.size(subsampled_selection_indices) - num_positives return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]), num_positives, num_negatives) class PenaltyReducedLogisticFocalLoss(Loss): """Penalty-reduced pixelwise logistic regression with focal loss. The loss is defined in Equation (1) of the Objects as Points[1] paper. Although the loss is defined per-pixel in the output space, this class assumes that each pixel is an anchor to be compatible with the base class. [1]: https://arxiv.org/abs/1904.07850 """ def __init__(self, alpha=2.0, beta=4.0, sigmoid_clip_value=1e-4): """Constructor. Args: alpha: Focussing parameter of the focal loss. Increasing this will decrease the loss contribution of the well classified examples. beta: The local penalty reduction factor. Increasing this will decrease the contribution of loss due to negative pixels near the keypoint. sigmoid_clip_value: The sigmoid operation used internally will be clipped between [sigmoid_clip_value, 1 - sigmoid_clip_value) """ self._alpha = alpha self._beta = beta self._sigmoid_clip_value = sigmoid_clip_value super(PenaltyReducedLogisticFocalLoss, self).__init__() def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. In all input tensors, `num_anchors` is the total number of pixels in the the output space. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted unscaled logits for each class. The function will compute sigmoid on this tensor internally. target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing a tensor with the 'splatted' keypoints, possibly using a gaussian kernel. This function assumes that the target is bounded between [0, 1]. weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ is_present_tensor = tf.math.equal(target_tensor, 1.0) prediction_tensor = tf.clip_by_value(tf.sigmoid(prediction_tensor), self._sigmoid_clip_value, 1 - self._sigmoid_clip_value) positive_loss = (tf.math.pow((1 - prediction_tensor), self._alpha)* tf.math.log(prediction_tensor)) negative_loss = (tf.math.pow((1 - target_tensor), self._beta)* tf.math.pow(prediction_tensor, self._alpha)* tf.math.log(1 - prediction_tensor)) loss = -tf.where(is_present_tensor, positive_loss, negative_loss) return loss * weights class L1LocalizationLoss(Loss): """L1 loss or absolute difference. When used in a per-pixel manner, each pixel should be given as an anchor. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.losses.absolute_difference( target_tensor, prediction_tensor, weights=weights, loss_collection=None, reduction=tf.losses.Reduction.NONE )
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/losses.py
losses.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow.compat.v1 as tf from object_detection.utils import ops class Match(object): """Class to store results from the matcher. This class is used to store the results from the matcher. It provides convenient methods to query the matching results. """ def __init__(self, match_results, use_matmul_gather=False): """Constructs a Match object. Args: match_results: Integer tensor of shape [N] with (1) match_results[i]>=0, meaning that column i is matched with row match_results[i]. (2) match_results[i]=-1, meaning that column i is not matched. (3) match_results[i]=-2, meaning that column i is ignored. use_matmul_gather: Use matrix multiplication based gather instead of standard tf.gather. (Default: False). Raises: ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor """ if match_results.shape.ndims != 1: raise ValueError('match_results should have rank 1') if match_results.dtype != tf.int32: raise ValueError('match_results should be an int32 or int64 scalar ' 'tensor') self._match_results = match_results self._gather_op = tf.gather if use_matmul_gather: self._gather_op = ops.matmul_gather_on_zeroth_axis @property def match_results(self): """The accessor for match results. Returns: the tensor which encodes the match results. """ return self._match_results def matched_column_indices(self): """Returns column indices that match to some row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) def matched_column_indicator(self): """Returns column indices that are matched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.greater_equal(self._match_results, 0) def num_matched_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.size(self.matched_column_indices()) def unmatched_column_indices(self): """Returns column indices that do not match any row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) def unmatched_column_indicator(self): """Returns column indices that are unmatched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.equal(self._match_results, -1) def num_unmatched_columns(self): """Returns number (int32 scalar tensor) of unmatched columns.""" return tf.size(self.unmatched_column_indices()) def ignored_column_indices(self): """Returns column indices that are ignored (neither Matched nor Unmatched). The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) def ignored_column_indicator(self): """Returns boolean column indicator where True means the colum is ignored. Returns: column_indicator: boolean vector which is True for all ignored column indices. """ return tf.equal(self._match_results, -2) def num_ignored_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.size(self.ignored_column_indices()) def unmatched_or_ignored_column_indices(self): """Returns column indices that are unmatched or ignored. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) def matched_row_indices(self): """Returns row indices that match some column. The indices returned by this op are ordered so as to be in correspondence with the output of matched_column_indicator(). For example if self.matched_column_indicator() is [0,2], and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and column 2 was matched to row 3. Returns: row_indices: int32 tensor of shape [K] with row indices. """ return self._reshape_and_cast( self._gather_op(tf.cast(self._match_results, dtype=tf.float32), self.matched_column_indices())) def num_matched_rows(self): """Returns number (int32 scalar tensor) of matched rows.""" unique_rows, _ = tf.unique(self.matched_row_indices()) return tf.size(unique_rows) def _reshape_and_cast(self, t): return tf.cast(tf.reshape(t, [-1]), tf.int32) def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value): """Gathers elements from `input_tensor` based on match results. For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]]. For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that are ignored gathered_tensor[col] is set to ignored_value. Note that the input_tensor.shape[1:] must match with unmatched_value.shape and ignored_value.shape Args: input_tensor: Tensor to gather values from. unmatched_value: Constant tensor value for unmatched columns. ignored_value: Constant tensor value for ignored columns. Returns: gathered_tensor: A tensor containing values gathered from input_tensor. The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:]. """ input_tensor = tf.concat( [tf.stack([ignored_value, unmatched_value]), input_tensor], axis=0) gather_indices = tf.maximum(self.match_results + 2, 0) gathered_tensor = self._gather_op(input_tensor, gather_indices) return gathered_tensor class Matcher(six.with_metaclass(abc.ABCMeta, object)): """Abstract base class for matcher. """ def __init__(self, use_matmul_gather=False): """Constructs a Matcher. Args: use_matmul_gather: Force constructed match objects to use matrix multiplication based gather instead of standard tf.gather. (Default: False). """ self._use_matmul_gather = use_matmul_gather def match(self, similarity_matrix, valid_rows=None, scope=None): """Computes matches among row and column indices and returns the result. Computes matches among the row and column indices based on the similarity matrix and optional arguments. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. valid_rows: A boolean tensor of shape [N] indicating the rows that are valid for matching. scope: Op scope name. Defaults to 'Match' if None. Returns: A Match object with the results of matching. """ with tf.name_scope(scope, 'Match') as scope: if valid_rows is None: valid_rows = tf.ones(tf.shape(similarity_matrix)[0], dtype=tf.bool) return Match(self._match(similarity_matrix, valid_rows), self._use_matmul_gather) @abc.abstractmethod def _match(self, similarity_matrix, valid_rows): """Method to be overridden by implementations. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. valid_rows: A boolean tensor of shape [N] indicating the rows that are valid for matching. Returns: match_results: Integer tensor of shape [M]: match_results[i]>=0 means that column i is matched to row match_results[i], match_results[i]=-1 means that the column is not matched. match_results[i]=-2 means that the column is ignored (usually this happens when there is a very weak match which one neither wants as positive nor negative example). """ pass
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/matcher.py
matcher.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_coder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import densepose_ops from object_detection.core import keypoint_ops from object_detection.core import matcher as mat from object_detection.core import region_similarity_calculator as sim_calc from object_detection.core import standard_fields as fields from object_detection.matchers import argmax_matcher from object_detection.matchers import hungarian_matcher from object_detection.utils import shape_utils from object_detection.utils import target_assigner_utils as ta_utils from object_detection.utils import tf_version if tf_version.is_tf1(): from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top ResizeMethod = tf2.image.ResizeMethod _DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0 class TargetAssigner(object): """Target assigner to compute classification and regression targets.""" def __init__(self, similarity_calc, matcher, box_coder_instance, negative_class_weight=1.0): """Construct Object Detection Target Assigner. Args: similarity_calc: a RegionSimilarityCalculator matcher: an object_detection.core.Matcher used to match groundtruth to anchors. box_coder_instance: an object_detection.core.BoxCoder used to encode matching groundtruth boxes with respect to anchors. negative_class_weight: classification weight to be associated to negative anchors (default: 1.0). The weight must be in [0., 1.]. Raises: ValueError: if similarity_calc is not a RegionSimilarityCalculator or if matcher is not a Matcher or if box_coder is not a BoxCoder """ if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator): raise ValueError('similarity_calc must be a RegionSimilarityCalculator') if not isinstance(matcher, mat.Matcher): raise ValueError('matcher must be a Matcher') if not isinstance(box_coder_instance, box_coder.BoxCoder): raise ValueError('box_coder must be a BoxCoder') self._similarity_calc = similarity_calc self._matcher = matcher self._box_coder = box_coder_instance self._negative_class_weight = negative_class_weight @property def box_coder(self): return self._box_coder # TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields. def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, unmatched_class_label=None, groundtruth_weights=None): """Assign classification and regression targets to each anchor. For a given set of anchors and groundtruth detections, match anchors to groundtruth_boxes and assign classification and regression targets to each anchor as well as weights based on the resulting match (specifying, e.g., which anchors should not contribute to training loss). Anchors that are not matched to anything are given a classification target of self._unmatched_cls_target which can be specified via the constructor. Args: anchors: a BoxList representing N anchors groundtruth_boxes: a BoxList representing M groundtruth boxes groundtruth_labels: a tensor of shape [M, d_1, ... d_k] with labels for each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set to None, groundtruth_labels assumes a binary problem where all ground_truth boxes get a positive label (of 1). unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None, unmatched_cls_target is set to be [0] for each anchor. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. The weights must be in [0., 1.]. If None, all weights are set to 1. Generally no groundtruth boxes with zero weight match to any anchors as matchers are aware of groundtruth weights. Additionally, `cls_weights` and `reg_weights` are calculated using groundtruth weights as an added safety. Returns: cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has shape [num_gt_boxes, d_1, d_2, ... d_k]. cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], representing weights for each element in cls_targets. reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension] reg_weights: a float32 tensor with shape [num_anchors] match: an int32 tensor of shape [num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[i] >= 0, anchor i is matched with groundtruth match[i]. (2) if match[i]=-1, anchor i is marked to be background . (3) if match[i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList """ if not isinstance(anchors, box_list.BoxList): raise ValueError('anchors must be an BoxList') if not isinstance(groundtruth_boxes, box_list.BoxList): raise ValueError('groundtruth_boxes must be an BoxList') if unmatched_class_label is None: unmatched_class_label = tf.constant([0], tf.float32) if groundtruth_labels is None: groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) unmatched_shape_assert = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], shape_utils.combined_static_and_dynamic_shape(unmatched_class_label)) labels_and_box_shapes_assert = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape( groundtruth_labels)[:1], shape_utils.combined_static_and_dynamic_shape( groundtruth_boxes.get())[:1]) if groundtruth_weights is None: num_gt_boxes = groundtruth_boxes.num_boxes_static() if not num_gt_boxes: num_gt_boxes = groundtruth_boxes.num_boxes() groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32) # set scores on the gt boxes scores = 1 - groundtruth_labels[:, 0] groundtruth_boxes.add_field(fields.BoxListFields.scores, scores) with tf.control_dependencies( [unmatched_shape_assert, labels_and_box_shapes_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, valid_rows=tf.greater(groundtruth_weights, 0)) reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match) cls_targets = self._create_classification_targets(groundtruth_labels, unmatched_class_label, match) reg_weights = self._create_regression_weights(match, groundtruth_weights) cls_weights = self._create_classification_weights(match, groundtruth_weights) # convert cls_weights from per-anchor to per-class. class_label_shape = tf.shape(cls_targets)[1:] weights_shape = tf.shape(cls_weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), class_label_shape], axis=0) for _ in range(len(cls_targets.get_shape()[1:])): cls_weights = tf.expand_dims(cls_weights, -1) cls_weights = tf.tile(cls_weights, weights_multiple) num_anchors = anchors.num_boxes_static() if num_anchors is not None: reg_targets = self._reset_target_shape(reg_targets, num_anchors) cls_targets = self._reset_target_shape(cls_targets, num_anchors) reg_weights = self._reset_target_shape(reg_weights, num_anchors) cls_weights = self._reset_target_shape(cls_weights, num_anchors) return (cls_targets, cls_weights, reg_targets, reg_weights, match.match_results) def _reset_target_shape(self, target, num_anchors): """Sets the static shape of the target. Args: target: the target tensor. Its first dimension will be overwritten. num_anchors: the number of anchors, which is used to override the target's first dimension. Returns: A tensor with the shape info filled in. """ target_shape = target.get_shape().as_list() target_shape[0] = num_anchors target.set_shape(target_shape) return target def _create_regression_targets(self, anchors, groundtruth_boxes, match): """Returns a regression target for each anchor. Args: anchors: a BoxList representing N anchors groundtruth_boxes: a BoxList representing M groundtruth_boxes match: a matcher.Match object Returns: reg_targets: a float32 tensor with shape [N, box_code_dimension] """ matched_gt_boxes = match.gather_based_on_match( groundtruth_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) if groundtruth_boxes.has_field(fields.BoxListFields.keypoints): groundtruth_keypoints = groundtruth_boxes.get_field( fields.BoxListFields.keypoints) matched_keypoints = match.gather_based_on_match( groundtruth_keypoints, unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])) matched_gt_boxlist.add_field(fields.BoxListFields.keypoints, matched_keypoints) matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors) match_results_shape = shape_utils.combined_static_and_dynamic_shape( match.match_results) # Zero out the unmatched and ignored regression targets. unmatched_ignored_reg_targets = tf.tile( self._default_regression_target(), [match_results_shape[0], 1]) matched_anchors_mask = match.matched_column_indicator() reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets) return reg_targets def _default_regression_target(self): """Returns the default target for anchors to regress to. Default regression targets are set to zero (though in this implementation what these targets are set to should not matter as the regression weight of any box set to regress to the default target is zero). Returns: default_target: a float32 tensor with shape [1, box_code_dimension] """ return tf.constant([self._box_coder.code_size*[0]], tf.float32) def _create_classification_targets(self, groundtruth_labels, unmatched_class_label, match): """Create classification targets for each anchor. Assign a classification target of for each anchor to the matching groundtruth label that is provided by match. Anchors that are not matched to anything are given the target self._unmatched_cls_target Args: groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] with labels for each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty (corresponding to scalar labels). unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. Returns: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has shape [num_gt_boxes, d_1, d_2, ... d_k]. """ return match.gather_based_on_match( groundtruth_labels, unmatched_value=unmatched_class_label, ignored_value=unmatched_class_label) def _create_regression_weights(self, match, groundtruth_weights): """Set regression weight for each anchor. Only positive anchors are set to contribute to the regression loss, so this method returns a weight of 1 for every positive anchor and 0 for every negative anchor. Args: match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. Returns: a float32 tensor with shape [num_anchors] representing regression weights. """ return match.gather_based_on_match( groundtruth_weights, ignored_value=0., unmatched_value=0.) def _create_classification_weights(self, match, groundtruth_weights): """Create classification weights for each anchor. Positive (matched) anchors are associated with a weight of positive_class_weight and negative (unmatched) anchors are associated with a weight of negative_class_weight. When anchors are ignored, weights are set to zero. By default, both positive/negative weights are set to 1.0, but they can be adjusted to handle class imbalance (which is almost always the case in object detection). Args: match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. Returns: a float32 tensor with shape [num_anchors] representing classification weights. """ return match.gather_based_on_match( groundtruth_weights, ignored_value=0., unmatched_value=self._negative_class_weight) def get_box_coder(self): """Get BoxCoder of this TargetAssigner. Returns: BoxCoder object. """ return self._box_coder # TODO(rathodv): This method pulls in all the implementation dependencies into # core. Therefore its best to have this factory method outside of core. def create_target_assigner(reference, stage=None, negative_class_weight=1.0, use_matmul_gather=False): """Factory function for creating standard target assigners. Args: reference: string referencing the type of TargetAssigner. stage: string denoting stage: {proposal, detection}. negative_class_weight: classification weight to be associated to negative anchors (default: 1.0) use_matmul_gather: whether to use matrix multiplication based gather which are better suited for TPUs. Returns: TargetAssigner: desired target assigner. Raises: ValueError: if combination reference+stage is invalid. """ if reference == 'Multibox' and stage == 'proposal': if tf_version.is_tf2(): raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.') similarity_calc = sim_calc.NegSqDistSimilarity() matcher = bipartite_matcher.GreedyBipartiteMatcher() box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder() elif reference == 'FasterRCNN' and stage == 'proposal': similarity_calc = sim_calc.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7, unmatched_threshold=0.3, force_match_for_each_row=True, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=[10.0, 10.0, 5.0, 5.0]) elif reference == 'FasterRCNN' and stage == 'detection': similarity_calc = sim_calc.IouSimilarity() # Uses all proposals with IOU < 0.5 as candidate negatives. matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, negatives_lower_than_unmatched=True, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=[10.0, 10.0, 5.0, 5.0]) elif reference == 'FastRCNN': similarity_calc = sim_calc.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.1, force_match_for_each_row=False, negatives_lower_than_unmatched=False, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder() else: raise ValueError('No valid combination of reference and stage.') return TargetAssigner(similarity_calc, matcher, box_coder_instance, negative_class_weight=negative_class_weight) def batch_assign(target_assigner, anchors_batch, gt_box_batch, gt_class_targets_batch, unmatched_class_label=None, gt_weights_batch=None): """Batched assignment of classification and regression targets. Args: target_assigner: a target assigner. anchors_batch: BoxList representing N box anchors or list of BoxList objects with length batch_size representing anchor sets. gt_box_batch: a list of BoxList objects with length batch_size representing groundtruth boxes for each image in the batch gt_class_targets_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, classification_target_size] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if input list lengths are inconsistent, i.e., batch_size == len(gt_box_batch) == len(gt_class_targets_batch) and batch_size == len(anchors_batch) unless anchors_batch is a single BoxList. """ if not isinstance(anchors_batch, list): anchors_batch = len(gt_box_batch) * [anchors_batch] if not all( isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') if not (len(anchors_batch) == len(gt_box_batch) == len(gt_class_targets_batch)): raise ValueError('batch size incompatible with lengths of anchors_batch, ' 'gt_box_batch and gt_class_targets_batch.') cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] match_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_targets_batch) for anchors, gt_boxes, gt_class_targets, gt_weights in zip( anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch): (cls_targets, cls_weights, reg_targets, reg_weights, match) = target_assigner.assign( anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights) cls_targets_list.append(cls_targets) cls_weights_list.append(cls_weights) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) match_list.append(match) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) batch_match = tf.stack(match_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) # Assign an alias to avoid large refactor of existing users. batch_assign_targets = batch_assign def batch_get_targets(batch_match, groundtruth_tensor_list, groundtruth_weights_list, unmatched_value, unmatched_weight): """Returns targets based on anchor-groundtruth box matching results. Args: batch_match: An int32 tensor of shape [batch, num_anchors] containing the result of target assignment returned by TargetAssigner.assign(..). groundtruth_tensor_list: A list of groundtruth tensors of shape [num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type. groundtruth_weights_list: A list of weights, one per groundtruth tensor, of shape [num_groundtruth]. unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as groundtruth tensor containing target value for anchors that remain unmatched. unmatched_weight: Scalar weight to assign to anchors that remain unmatched. Returns: targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k] containing targets for anchors. weights: A float tensor of shape [batch, num_anchors] containing the weights to assign to each target. """ match_list = tf.unstack(batch_match) targets_list = [] weights_list = [] for match_tensor, groundtruth_tensor, groundtruth_weight in zip( match_list, groundtruth_tensor_list, groundtruth_weights_list): match_object = mat.Match(match_tensor) targets = match_object.gather_based_on_match( groundtruth_tensor, unmatched_value=unmatched_value, ignored_value=unmatched_value) targets_list.append(targets) weights = match_object.gather_based_on_match( groundtruth_weight, unmatched_value=unmatched_weight, ignored_value=tf.zeros_like(unmatched_weight)) weights_list.append(weights) return tf.stack(targets_list), tf.stack(weights_list) def batch_assign_confidences(target_assigner, anchors_batch, gt_box_batch, gt_class_confidences_batch, gt_weights_batch=None, unmatched_class_label=None, include_background_class=True, implicit_class_weight=1.0): """Batched assignment of classification and regression targets. This differences between batch_assign_confidences and batch_assign_targets: - 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and tensor (high-dimensional) targets. 'batch_assign_confidences' only support scalar (agnostic) and vector (multiclass) targets. - 'batch_assign_targets' assumes the input class tensor using the binary one/K-hot encoding. 'batch_assign_confidences' takes the class confidence scores as the input, where 1 means positive classes, 0 means implicit negative classes, and -1 means explicit negative classes. - 'batch_assign_confidences' assigns the targets in the similar way as 'batch_assign_targets' except that it gives different weights for implicit and explicit classes. This allows user to control the negative gradients pushed differently for implicit and explicit examples during the training. Args: target_assigner: a target assigner. anchors_batch: BoxList representing N box anchors or list of BoxList objects with length batch_size representing anchor sets. gt_box_batch: a list of BoxList objects with length batch_size representing groundtruth boxes for each image in the batch gt_class_confidences_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, classification_target_size] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. Note that in this tensor, 1 means explicit positive class, -1 means explicit negative class, and 0 means implicit negative class. gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_gt_boxes_i] containing weights for groundtruth boxes. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). include_background_class: whether or not gt_class_confidences_batch includes the background class. implicit_class_weight: the weight assigned to implicit examples. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if input list lengths are inconsistent, i.e., batch_size == len(gt_box_batch) == len(gt_class_targets_batch) and batch_size == len(anchors_batch) unless anchors_batch is a single BoxList, or if any element in gt_class_confidences_batch has rank > 2. """ if not isinstance(anchors_batch, list): anchors_batch = len(gt_box_batch) * [anchors_batch] if not all( isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') if not (len(anchors_batch) == len(gt_box_batch) == len(gt_class_confidences_batch)): raise ValueError('batch size incompatible with lengths of anchors_batch, ' 'gt_box_batch and gt_class_confidences_batch.') cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] match_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_confidences_batch) for anchors, gt_boxes, gt_class_confidences, gt_weights in zip( anchors_batch, gt_box_batch, gt_class_confidences_batch, gt_weights_batch): if (gt_class_confidences is not None and len(gt_class_confidences.get_shape().as_list()) > 2): raise ValueError('The shape of the class target is not supported. ', gt_class_confidences.get_shape()) cls_targets, _, reg_targets, _, match = target_assigner.assign( anchors, gt_boxes, gt_class_confidences, unmatched_class_label, groundtruth_weights=gt_weights) if include_background_class: cls_targets_without_background = tf.slice( cls_targets, [0, 1], [-1, -1]) else: cls_targets_without_background = cls_targets positive_mask = tf.greater(cls_targets_without_background, 0.0) negative_mask = tf.less(cls_targets_without_background, 0.0) explicit_example_mask = tf.logical_or(positive_mask, negative_mask) positive_anchors = tf.reduce_any(positive_mask, axis=-1) regression_weights = tf.cast(positive_anchors, dtype=tf.float32) regression_targets = ( reg_targets * tf.expand_dims(regression_weights, axis=-1)) regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1) cls_targets_without_background = ( cls_targets_without_background * (1 - tf.cast(negative_mask, dtype=tf.float32))) cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast( explicit_example_mask, dtype=tf.float32) + implicit_class_weight) if include_background_class: cls_weights_background = ( (1 - implicit_class_weight) * regression_weights_expanded + implicit_class_weight) classification_weights = tf.concat( [cls_weights_background, cls_weights_without_background], axis=-1) cls_targets_background = 1 - regression_weights_expanded classification_targets = tf.concat( [cls_targets_background, cls_targets_without_background], axis=-1) else: classification_targets = cls_targets_without_background classification_weights = cls_weights_without_background cls_targets_list.append(classification_targets) cls_weights_list.append(classification_weights) reg_targets_list.append(regression_targets) reg_weights_list.append(regression_weights) match_list.append(match) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) batch_match = tf.stack(match_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) def _smallest_positive_root(a, b, c): """Returns the smallest positive root of a quadratic equation.""" discriminant = tf.sqrt(b ** 2 - 4 * a * c) # TODO(vighneshb) We are currently using the slightly incorrect # CenterNet implementation. The commented lines implement the fixed version # in https://github.com/princeton-vl/CornerNet. Change the implementation # after verifying it has no negative impact. # root1 = (-b - discriminant) / (2 * a) # root2 = (-b + discriminant) / (2 * a) # return tf.where(tf.less(root1, 0), root2, root1) return (-b + discriminant) / (2.0) def max_distance_for_overlap(height, width, min_iou): """Computes how far apart bbox corners can lie while maintaining the iou. Given a bounding box size, this function returns a lower bound on how far apart the corners of another box can lie while still maintaining the given IoU. The implementation is based on the `gaussian_radius` function in the Objects as Points github repo: https://github.com/xingyizhou/CenterNet Args: height: A 1-D float Tensor representing height of the ground truth boxes. width: A 1-D float Tensor representing width of the ground truth boxes. min_iou: A float representing the minimum IoU desired. Returns: distance: A 1-D Tensor of distances, of the same length as the input height and width tensors. """ # Given that the detected box is displaced at a distance `d`, the exact # IoU value will depend on the angle at which each corner is displaced. # We simplify our computation by assuming that each corner is displaced by # a distance `d` in both x and y direction. This gives us a lower IoU than # what is actually realizable and ensures that any box with corners less # than `d` distance apart will always have an IoU greater than or equal # to `min_iou` # The following 3 cases can be worked on geometrically and come down to # solving a quadratic inequality. In each case, to ensure `min_iou` we use # the smallest positive root of the equation. # Case where detected box is offset from ground truth and no box completely # contains the other. distance_detection_offset = _smallest_positive_root( a=1, b=-(height + width), c=width * height * ((1 - min_iou) / (1 + min_iou)) ) # Case where detection is smaller than ground truth and completely contained # in it. distance_detection_in_gt = _smallest_positive_root( a=4, b=-2 * (height + width), c=(1 - min_iou) * width * height ) # Case where ground truth is smaller than detection and completely contained # in it. distance_gt_in_detection = _smallest_positive_root( a=4 * min_iou, b=(2 * min_iou) * (width + height), c=(min_iou - 1) * width * height ) return tf.reduce_min([distance_detection_offset, distance_gt_in_detection, distance_detection_in_gt], axis=0) def get_batch_predictions_from_indices(batch_predictions, indices): """Gets the values of predictions in a batch at the given indices. The indices are expected to come from the offset targets generation functions in this library. The returned value is intended to be used inside a loss function. Args: batch_predictions: A tensor of shape [batch_size, height, width, channels] or [batch_size, height, width, class, channels] for class-specific features (e.g. keypoint joint offsets). indices: A tensor of shape [num_instances, 3] for single class features or [num_instances, 4] for multiple classes features. Returns: values: A tensor of shape [num_instances, channels] holding the predicted values at the given indices. """ # Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on # TPU) than gather with flattened inputs, so reshape the tensor, flatten the # indices, and run gather. shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions) # [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1] rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0]) rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0) # Compute flattened indices and gather. flattened_inds = tf.linalg.matmul( indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0] batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]]) return tf.gather(batch_predictions_2d, flattened_inds, axis=0) def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap): """Computes the standard deviation of the Gaussian kernel from box size. Args: boxes_height: A 1D tensor with shape [num_instances] representing the height of each box. boxes_width: A 1D tensor with shape [num_instances] representing the width of each box. min_overlap: The minimum IOU overlap that boxes need to have to not be penalized. Returns: A 1D tensor with shape [num_instances] representing the computed Gaussian sigma for each of the box. """ # We are dividing by 3 so that points closer than the computed # distance have a >99% CDF. sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap) sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0 return sigma def _preprocess_keypoints_and_weights(out_height, out_width, keypoints, class_onehot, class_weights, keypoint_weights, class_id, keypoint_indices): """Preprocesses the keypoints and the corresponding keypoint weights. This function performs several common steps to preprocess the keypoints and keypoint weights features, including: 1) Select the subset of keypoints based on the keypoint indices, fill the keypoint NaN values with zeros and convert to absolute coordinates. 2) Generate the weights of the keypoint using the following information: a. The class of the instance. b. The NaN value of the keypoint coordinates. c. The provided keypoint weights. Args: out_height: An integer or an integer tensor indicating the output height of the model. out_width: An integer or an integer tensor indicating the output width of the model. keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2] representing the original keypoint grountruth coordinates. class_onehot: A float tensor of shape [num_instances, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. class_weights: A float tensor of shape [num_instances] containing weights for groundtruth instances. keypoint_weights: A float tensor of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints that should be considered in this task. Returns: A tuple of two tensors: keypoint_absolute: A float tensor of shape [num_instances, num_keypoints, 2] which is the selected and updated keypoint coordinates. keypoint_weights: A float tensor of shape [num_instances, num_keypoints] representing the updated weight of each keypoint. """ # Select the targets keypoints by their type ids and generate the mask # of valid elements. valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class( keypoint_coordinates=keypoints, class_id=class_id, class_onehot=class_onehot, class_weights=class_weights, keypoint_indices=keypoint_indices) # Keypoint coordinates in absolute coordinate system. # The shape of the tensors: [num_instances, num_keypoints, 2]. keypoints_absolute = keypoint_ops.to_absolute_coordinates( keypoints, out_height, out_width) # Assign default weights for the keypoints. if keypoint_weights is None: keypoint_weights = tf.ones_like(keypoints[:, :, 0]) else: keypoint_weights = tf.gather( keypoint_weights, indices=keypoint_indices, axis=1) keypoint_weights = keypoint_weights * valid_mask return keypoints_absolute, keypoint_weights class CenterNetCenterHeatmapTargetAssigner(object): """Wrapper to compute the object center heatmap.""" def __init__(self, stride, min_overlap=0.7, compute_heatmap_sparse=False, keypoint_class_id=None, keypoint_indices=None, keypoint_weights_for_center=None): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. min_overlap: The minimum IOU overlap that boxes need to have to not be penalized. compute_heatmap_sparse: bool, indicating whether or not to use the sparse version of the Op that computes the heatmap. The sparse version scales better with number of classes, but in some cases is known to cause OOM error. See (b/170989061). keypoint_class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. keypoint_weights_for_center: The keypoint weights used for calculating the location of object center. The number of weights need to be the same as the number of keypoints. The object center is calculated by the weighted mean of the keypoint locations. If not provided, the object center is determined by the center of the bounding box (default behavior). """ self._stride = stride self._min_overlap = min_overlap self._compute_heatmap_sparse = compute_heatmap_sparse self._keypoint_class_id = keypoint_class_id self._keypoint_indices = keypoint_indices self._keypoint_weights_for_center = keypoint_weights_for_center def assign_center_targets_from_boxes(self, height, width, gt_boxes_list, gt_classes_list, gt_weights_list=None): """Computes the object center heatmap target. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The box coordinates are expected in normalized coordinates. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_weights_list: A list of float tensors with shape [num_boxes] representing the weight of each groundtruth detection box. Returns: heatmap: A Tensor of size [batch_size, output_height, output_width, num_classes] representing the per class center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. """ out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) heatmaps = [] if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) # TODO(vighneshb) Replace the for loop with a batch version. for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list, gt_weights_list): boxes = box_list.BoxList(boxes) # Convert the box coordinates to absolute output image dimension space. boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_instances] (y_center, x_center, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() # Compute the sigma from box size. The tensor shape: [num_instances]. sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, self._min_overlap) # Apply the Gaussian kernel to the center coordinates. Returned heatmap # has shape of [out_height, out_width, num_classes] heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=y_center, x_coordinates=x_center, sigma=sigma, channel_onehot=class_targets, channel_weights=weights, sparse=self._compute_heatmap_sparse) heatmaps.append(heatmap) # Return the stacked heatmaps over the batch. return tf.stack(heatmaps, axis=0) def assign_center_targets_from_keypoints(self, height, width, gt_classes_list, gt_keypoints_list, gt_weights_list=None, gt_keypoints_weights_list=None): """Computes the object center heatmap target using keypoint locations. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_keypoints_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The box coordinates are expected in normalized coordinates. gt_weights_list: A list of float tensors with shape [num_boxes] representing the weight of each groundtruth detection box. gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. If not provided, then all not NaN keypoints will be equally weighted. Returns: heatmap: A Tensor of size [batch_size, output_height, output_width, num_classes] representing the per class center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. """ assert (self._keypoint_weights_for_center is not None and self._keypoint_class_id is not None and self._keypoint_indices is not None) out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) heatmaps = [] if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) for keypoints, classes, kp_weights, weights in zip( gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=out_height, out_width=out_width, keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._keypoint_class_id, keypoint_indices=self._keypoint_indices) # _, num_keypoints, _ = ( # shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # Update the keypoint weights by the specified keypoints weights. kp_loc_weights = tf.constant( self._keypoint_weights_for_center, dtype=tf.float32) updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :] # Obtain the sum of the weights for each instance. # instance_weight_sum has shape: [num_instance]. instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1) # Weight the keypoint coordinates by updated_kp_weights. # weighted_keypoints has shape: [num_instance, num_keypoints, 2] weighted_keypoints = keypoints_absolute * tf.expand_dims( updated_kp_weights, axis=2) # Compute the mean of the keypoint coordinates over the weighted # keypoints. # keypoint_mean has shape: [num_instance, 2] keypoint_mean = tf.math.divide( tf.reduce_sum(weighted_keypoints, axis=1), tf.expand_dims(instance_weight_sum, axis=-1)) # Replace the NaN values (due to divided by zeros in the above operation) # by 0.0 where the sum of instance weight is zero. # keypoint_mean has shape: [num_instance, 2] keypoint_mean = tf.where( tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0, keypoint_mean, tf.zeros_like(keypoint_mean)) # Compute the distance from each keypoint to the mean location using # broadcasting and weighted by updated_kp_weights. # keypoint_dist has shape: [num_instance, num_keypoints] keypoint_mean = tf.expand_dims(keypoint_mean, axis=1) keypoint_dist = tf.math.sqrt( tf.reduce_sum( tf.math.square(keypoints_absolute - keypoint_mean), axis=2)) keypoint_dist = keypoint_dist * updated_kp_weights # Compute the average of the distances from each keypoint to the mean # location and update the average value by zero when the instance weight # is zero. # avg_radius has shape: [num_instance] avg_radius = tf.math.divide( tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum) avg_radius = tf.where( instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius)) # Update the class instance weight. If the instance doesn't contain enough # valid keypoint values (i.e. instance_weight_sum == 0.0), then set the # instance weight to zero. # updated_class_weights has shape: [num_instance] updated_class_weights = tf.where( instance_weight_sum > 0.0, weights, tf.zeros_like(weights)) # Compute the sigma from average distance. We use 2 * average distance to # to approximate the width/height of the bounding box. # sigma has shape: [num_instances]. sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius, self._min_overlap) # Apply the Gaussian kernel to the center coordinates. Returned heatmap # has shape of [out_height, out_width, num_classes] heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=keypoint_mean[:, 0, 0], x_coordinates=keypoint_mean[:, 0, 1], sigma=sigma, channel_onehot=classes, channel_weights=updated_class_weights, sparse=self._compute_heatmap_sparse) heatmaps.append(heatmap) # Return the stacked heatmaps over the batch. return tf.stack(heatmaps, axis=0) class CenterNetBoxTargetAssigner(object): """Wrapper to compute target tensors for the object detection task. This class has methods that take as input a batch of ground truth tensors (in the form of a list) and return the targets required to train the object detection task. """ def __init__(self, stride): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. """ self._stride = stride def assign_size_and_offset_targets(self, height, width, gt_boxes_list, gt_weights_list=None): """Returns the box height/width and center offset targets and their indices. The returned values are expected to be used with predicted tensors of size (batch_size, height//self._stride, width//self._stride, 2). The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_weights_list: A list of tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_box_height_width: a float tensor of shape [num_boxes, 2] holding expected height and width of each box in the output space. batch_offsets: a float tensor of shape [num_boxes, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_boxes] indicating the weight of each prediction. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_box_height_width = [] batch_weights = [] batch_offsets = [] for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_boxes, 2] # indices: [num_boxes, 2] (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_box_height_width.append( tf.stack([boxes_height, boxes_width], axis=1)) batch_weights.append(weights) batch_offsets.append(offsets) batch_indices = tf.concat(batch_indices, axis=0) batch_box_height_width = tf.concat(batch_box_height_width, axis=0) batch_weights = tf.concat(batch_weights, axis=0) batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_box_height_width, batch_offsets, batch_weights) # TODO(yuhuic): Update this class to handle the instance/keypoint weights. # Currently those weights are used as "mask" to indicate whether an # instance/keypoint should be considered or not (expecting only either 0 or 1 # value). In reality, the weights can be any value and this class should handle # those values properly. class CenterNetKeypointTargetAssigner(object): """Wrapper to compute target tensors for the CenterNet keypoint estimation. This class has methods that take as input a batch of groundtruth tensors (in the form of a list) and returns the targets required to train the CenterNet model for keypoint estimation. Specifically, the class methods expect the groundtruth in the following formats (consistent with the standard Object Detection API). Note that usually the groundtruth tensors are packed with a list which represents the batch dimension: gt_classes_list: [Required] a list of 2D tf.float32 one-hot (or k-hot) tensors of shape [num_instances, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of shape [num_instances, num_total_keypoints, 2] containing keypoint coordinates. Note that the "num_total_keypoints" should be the sum of the num_keypoints over all possible keypoint types, e.g. human pose, face. For example, if a dataset contains both 17 human pose keypoints and 5 face keypoints, then num_total_keypoints = 17 + 5 = 22. If an intance contains only a subet of keypoints (e.g. human pose keypoints but not face keypoints), the face keypoints will be filled with zeros. Also note that keypoints are assumed to be provided in normalized coordinates and missing keypoints should be encoded as NaN. gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. If not provided, then all not NaN keypoints will be equally weighted. gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape [num_instances, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. Note that the boxes are only used to compute the center targets but are not considered as required output of the keypoint task. If the boxes were not provided, the center targets will be inferred from the keypoints [not implemented yet]. gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape [num_instances] containing weights for groundtruth boxes. Only useful when gt_boxes_list is also provided. """ def __init__(self, stride, class_id, keypoint_indices, keypoint_std_dev=None, per_keypoint_offset=False, peak_radius=0, compute_heatmap_sparse=False, per_keypoint_depth=False): """Initializes a CenterNet keypoints target assigner. Args: stride: int, the stride of the network in output pixels. class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. For example, if the task is human pose estimation, the class id should correspond to the "human" class. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. keypoint_std_dev: A list of floats represent the standard deviation of the Gaussian kernel used to generate the keypoint heatmap (in the unit of output pixels). It is to provide the flexibility of using different sizes of Gaussian kernel for each keypoint type. If not provided, then all standard deviation will be the same as the default value (10.0 in the output pixel space). If provided, the length of keypoint_std_dev needs to be the same as the length of keypoint_indices, indicating the standard deviation of each keypoint type. per_keypoint_offset: boolean, indicating whether to assign offset for each keypoint channel. If set False, the output offset target will have the shape [batch_size, out_height, out_width, 2]. If set True, the output offset target will have the shape [batch_size, out_height, out_width, 2 * num_keypoints]. peak_radius: int, the radius (in the unit of output pixel) around heatmap peak to assign the offset targets. compute_heatmap_sparse: bool, indicating whether or not to use the sparse version of the Op that computes the heatmap. The sparse version scales better with number of keypoint types, but in some cases is known to cause an OOM error. See (b/170989061). per_keypoint_depth: A bool indicates whether the model predicts the depth of each keypoints in independent channels. Similar to per_keypoint_offset but for the keypoint depth. """ self._stride = stride self._class_id = class_id self._keypoint_indices = keypoint_indices self._per_keypoint_offset = per_keypoint_offset self._per_keypoint_depth = per_keypoint_depth self._peak_radius = peak_radius self._compute_heatmap_sparse = compute_heatmap_sparse if keypoint_std_dev is None: self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] * len(keypoint_indices)) else: assert len(keypoint_indices) == len(keypoint_std_dev) self._keypoint_std_dev = keypoint_std_dev def assign_keypoint_heatmap_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list=None, gt_weights_list=None, gt_boxes_list=None): """Returns the keypoint heatmap targets for the CenterNet model. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of float tensors with shape [num_instances, num_total_keypoints, 2]. See class-level description for more detail. gt_classes_list: A list of float tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See class-level description for more detail. If provided, the keypoint standard deviations will be scaled based on the box sizes. Returns: heatmap: A float tensor of shape [batch_size, output_height, output_width, num_keypoints] representing the per keypoint type center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. Note that the "num_keypoints" is defined by the length of keypoint_indices, which is not necessarily equal to "num_total_keypoints". num_instances_batch: A 2D int tensor of shape [batch_size, num_keypoints] representing number of instances for each keypoint type. valid_mask: A float tensor with shape [batch_size, output_height, output_width, num_keypoints] where all values within the regions of the blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes are per keypoint type and are blacked out if the keypoint visibility/weight (of the corresponding keypoint type) is zero. """ out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_boxes_list is None: gt_boxes_list = [None] * len(gt_keypoints_list) heatmaps = [] num_instances_list = [] valid_mask_list = [] for keypoints, classes, kp_weights, weights, boxes in zip( gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list, gt_boxes_list): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=out_height, out_width=out_width, keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # A tensor of shape [num_instances, num_keypoints] with # each element representing the type dimension for each corresponding # keypoint: # [[0, 1, ..., k-1], # [0, 1, ..., k-1], # : # [0, 1, ..., k-1]] keypoint_types = tf.tile( input=tf.expand_dims(tf.range(num_keypoints), axis=0), multiples=[num_instances, 1]) # A tensor of shape [num_instances, num_keypoints] with # each element representing the sigma of the Gaussian kernel for each # keypoint. keypoint_std_dev = tf.tile( input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0), multiples=[num_instances, 1]) # If boxes is not None, then scale the standard deviation based on the # size of the object bounding boxes similar to object center heatmap. if boxes is not None: boxes = box_list.BoxList(boxes) # Convert the box coordinates to absolute output image dimension space. boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box height and width. Each returned tensors have the shape # of [num_instances] (_, _, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() # Compute the sigma from box size. The tensor shape: [num_instances]. sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7) keypoint_std_dev = keypoint_std_dev * tf.stack( [sigma] * num_keypoints, axis=1) # Generate the per-keypoint type valid region mask to ignore regions # with keypoint weights equal to zeros (e.g. visibility is 0). # shape of valid_mask: [out_height, out_width, num_keypoints] kp_weight_list = tf.unstack(kp_weights, axis=1) valid_mask_channel_list = [] for kp_weight in kp_weight_list: blackout = kp_weight < 1e-3 valid_mask_channel_list.append( ta_utils.blackout_pixel_weights_by_box_regions( out_height, out_width, boxes.get(), blackout)) valid_mask = tf.stack(valid_mask_channel_list, axis=2) valid_mask_list.append(valid_mask) # Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap # has shape of [out_height, out_width, num_keypoints]. heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]), sigma=tf.keras.backend.flatten(keypoint_std_dev), channel_onehot=tf.one_hot( tf.keras.backend.flatten(keypoint_types), depth=num_keypoints), channel_weights=tf.keras.backend.flatten(kp_weights)) num_instances_list.append( tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32)) heatmaps.append(heatmap) return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0), tf.stack(valid_mask_list, axis=0)) def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors): """Gets keypoint type index tensor. The function prepares the tensor of keypoint indices with shape [num_instances, num_keypoints, num_neighbors]. Each element represents the keypoint type index for each corresponding keypoint and tiled along the 3rd axis: [[0, 1, ..., num_keypoints - 1], [0, 1, ..., num_keypoints - 1], : [0, 1, ..., num_keypoints - 1]] Args: num_instances: int, the number of instances, used to define the 1st dimension. num_keypoints: int, the number of keypoint types, used to define the 2nd dimension. num_neighbors: int, the number of neighborhood pixels to consider for each keypoint, used to define the 3rd dimension. Returns: A integer tensor of shape [num_instances, num_keypoints, num_neighbors]. """ keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis] tiled_keypoint_types = tf.tile(keypoint_types, multiples=[num_instances, 1, num_neighbors]) return tiled_keypoint_types def assign_keypoints_offset_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the offsets and indices of the keypoints for location refinement. The returned values are used to refine the location of each keypoints in the heatmap. The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of tensors with shape [num_instances, num_total_keypoints]. See class-level description for more detail. gt_classes_list: A list of tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_total_instances, 3] (or [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column corresponds to the channel dimension (if 'per_keypoint_offset' is set True). batch_offsets: a float tensor of shape [num_total_instances, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_total_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints * num_neighbors """ batch_indices = [] batch_offsets = [] batch_weights = [] if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) for i, (keypoints, classes, kp_weights, weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # [num_instances * num_keypoints] y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) # All keypoint coordinates and their neighbors: # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), y_source, x_source, self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) # Update the valid keypoint weights. # [num_instance * num_keypoints, num_neighbors] valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_instances * num_keypoints, num_neighbors, 2] # indices: [num_instances * num_keypoints, num_neighbors, 2] offsets, indices = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=y_source, x_target=x_source) # Reshape to: # offsets: [num_instances * num_keypoints * num_neighbors, 2] # indices: [num_instances * num_keypoints * num_neighbors, 2] offsets = tf.reshape(offsets, [-1, 2]) indices = tf.reshape(indices, [-1, 2]) # Prepare the batch indices to be prepended. batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) if self._per_keypoint_offset: tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) else: batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_offsets.append(offsets) batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if # 'per_keypoint_offset' is set to True. batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2] batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_offsets, batch_weights) def assign_keypoints_depth_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoint_depths_list, gt_keypoint_depth_weights_list, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the target depths of the keypoints. The returned values are the relative depth information of each keypoints. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of tensors with shape [num_instances, num_total_keypoints, 2]. See class-level description for more detail. gt_classes_list: A list of tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoint_depths_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the relative depth of the keypoints. gt_keypoint_depth_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weights of the relative depth. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_total_instances, 3] (or [num_total_instances, 4] if 'per_keypoint_depth' is set True) holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column corresponds to the channel dimension (if 'per_keypoint_offset' is set True). batch_depths: a float tensor of shape [num_total_instances, 1] (or [num_total_instances, num_keypoints] if per_keypoint_depth is set True) indicating the target depth of each keypoint. batch_weights: a float tensor of shape [num_total_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints * num_neighbors """ batch_indices = [] batch_weights = [] batch_depths = [] if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_keypoint_depths_list is None: gt_keypoint_depths_list = [None] * len(gt_classes_list) for i, (keypoints, classes, kp_weights, weights, keypoint_depths, keypoint_depth_weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list, gt_keypoint_depths_list, gt_keypoint_depth_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # [num_instances * num_keypoints] y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) # All keypoint coordinates and their neighbors: # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), y_source, x_source, self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) # Update the valid keypoint weights. # [num_instance * num_keypoints, num_neighbors] valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # indices: [num_instances * num_keypoints, num_neighbors, 2] _, indices = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=y_source, x_target=x_source) # Reshape to: # indices: [num_instances * num_keypoints * num_neighbors, 2] indices = tf.reshape(indices, [-1, 2]) # Gather the keypoint depth from corresponding keypoint indices: # [num_instances, num_keypoints] keypoint_depths = tf.gather( keypoint_depths, self._keypoint_indices, axis=1) # Tile the depth target to surrounding pixels. # [num_instances, num_keypoints, num_neighbors] tiled_keypoint_depths = tf.tile( tf.expand_dims(keypoint_depths, axis=-1), multiples=[1, 1, num_neighbors]) # [num_instances, num_keypoints] keypoint_depth_weights = tf.gather( keypoint_depth_weights, self._keypoint_indices, axis=1) # [num_instances, num_keypoints, num_neighbors] keypoint_depth_weights = tf.tile( tf.expand_dims(keypoint_depth_weights, axis=-1), multiples=[1, 1, num_neighbors]) # Update the weights of keypoint depth by the weights of the keypoints. # A keypoint depth target is valid only if its corresponding keypoint # target is also valid. # [num_instances, num_keypoints, num_neighbors] tiled_depth_weights = ( tf.reshape(valid_keypoints, [num_instances, num_keypoints, num_neighbors]) * keypoint_depth_weights) invalid_depths = tf.logical_or( tf.math.is_nan(tiled_depth_weights), tf.math.is_nan(tiled_keypoint_depths)) # Assign zero values and weights to NaN values. final_keypoint_depths = tf.where(invalid_depths, tf.zeros_like(tiled_keypoint_depths), tiled_keypoint_depths) final_keypoint_depth_weights = tf.where( invalid_depths, tf.zeros_like(tiled_depth_weights), tiled_depth_weights) # [num_instances * num_keypoints * num_neighbors, 1] batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1])) # Prepare the batch indices to be prepended. batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) if self._per_keypoint_depth: tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) else: batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append( tf.keras.backend.flatten(final_keypoint_depth_weights)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if # 'per_keypoint_offset' is set to True. batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1] batch_depths = tf.concat(batch_depths, axis=0) return (batch_indices, batch_depths, batch_weights) def assign_joint_regression_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_boxes_list=None, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the joint regression from center grid to keypoints. The joint regression is used as the grouping cue from the estimated keypoints to instance center. The offsets are the vectors from the floored object center coordinates to the keypoint coordinates. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of float tensors with shape [num_instances, num_total_keypoints]. See class-level description for more detail. gt_classes_list: A list of float tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See class-level description for more detail. If provided, then the center targets will be computed based on the center of the boxes. gt_keypoints_weights_list: A list of float tensors with shape [num_instances, num_total_keypoints] representing to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_instances, 4] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively, the last dimension refers to the keypoint type dimension. batch_offsets: a float tensor of shape [num_instances, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints Raises: NotImplementedError: currently the object center coordinates need to be computed from groundtruth bounding boxes. The functionality of generating the object center coordinates from keypoints is not implemented yet. """ batch_indices = [] batch_offsets = [] batch_weights = [] batch_size = len(gt_keypoints_list) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * batch_size if gt_boxes_list is None: gt_boxes_list = [None] * batch_size if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # If boxes are provided, compute the joint center from it. if boxes is not None: # Compute joint center from boxes. boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes() else: # TODO(yuhuic): Add the logic to generate object centers from keypoints. raise NotImplementedError(( 'The functionality of generating object centers from keypoints is' ' not implemented yet. Please provide groundtruth bounding boxes.' )) # Tile the yx center coordinates to be the same shape as keypoints. y_center_tiled = tf.tile( tf.reshape(y_center, shape=[num_instances, 1]), multiples=[1, num_keypoints]) x_center_tiled = tf.tile( tf.reshape(x_center, shape=[num_instances, 1]), multiples=[1, num_keypoints]) # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), tf.keras.backend.flatten(y_center_tiled), tf.keras.backend.flatten(x_center_tiled), self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_instances * num_keypoints, 2] # indices: [num_instances * num_keypoints, 2] (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1])) # Reshape to: # offsets: [num_instances * num_keypoints * num_neighbors, 2] # indices: [num_instances * num_keypoints * num_neighbors, 2] offsets = tf.reshape(offsets, [-1, 2]) indices = tf.reshape(indices, [-1, 2]) # keypoint type tensor: [num_instances, num_keypoints, num_neighbors]. tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) batch_offsets.append(offsets) batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints, 4] batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints, 2] batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_offsets, batch_weights) def _resize_masks(masks, height, width, method): # Resize segmentation masks to conform to output dimensions. Use TF2 # image resize because TF1's version is buggy: # https://yaqs.corp.google.com/eng/q/4970450458378240 masks = tf2.image.resize( masks[:, :, :, tf.newaxis], size=(height, width), method=method) return masks[:, :, :, 0] class CenterNetMaskTargetAssigner(object): """Wrapper to compute targets for segmentation masks.""" def __init__(self, stride): self._stride = stride def assign_segmentation_targets( self, gt_masks_list, gt_classes_list, gt_boxes_list=None, gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR): """Computes the segmentation targets. This utility produces a semantic segmentation mask for each class, starting with whole image instance segmentation masks. Effectively, each per-class segmentation target is the union of all masks from that class. Args: gt_masks_list: A list of float tensors with shape [num_boxes, input_height, input_width] with values in {0, 1} representing instance masks for each object. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4] with normalized boxes corresponding to each mask. The boxes are used to spatially allocate mask weights. gt_mask_weights_list: An optional list of float tensors with shape [num_boxes] with weights for each mask. If a mask has a zero weight, it indicates that the box region associated with the mask should not contribute to the loss. If not provided, will use a per-pixel weight of 1. mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use when resizing masks from input resolution to output resolution. Returns: segmentation_targets: An int32 tensor of size [batch_size, output_height, output_width, num_classes] representing the class of each location in the output space. segmentation_weight: A float32 tensor of size [batch_size, output_height, output_width] indicating the loss weight to apply at each location. """ _, num_classes = shape_utils.combined_static_and_dynamic_shape( gt_classes_list[0]) _, input_height, input_width = ( shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) output_height = tf.maximum(input_height // self._stride, 1) output_width = tf.maximum(input_width // self._stride, 1) if gt_boxes_list is None: gt_boxes_list = [None] * len(gt_masks_list) if gt_mask_weights_list is None: gt_mask_weights_list = [None] * len(gt_masks_list) segmentation_targets_list = [] segmentation_weights_list = [] for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip( gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list): if gt_boxes is not None and gt_mask_weights is not None: boxes = box_list.BoxList(gt_boxes) # Convert the box coordinates to absolute output image dimension space. boxes_absolute = box_list_ops.to_absolute_coordinates( boxes, output_height, output_width) # Generate a segmentation weight that applies mask weights in object # regions. blackout = gt_mask_weights <= 0 segmentation_weight_for_image = ( ta_utils.blackout_pixel_weights_by_box_regions( output_height, output_width, boxes_absolute.get(), blackout, weights=gt_mask_weights)) segmentation_weights_list.append(segmentation_weight_for_image) else: segmentation_weights_list.append(tf.ones((output_height, output_width), dtype=tf.float32)) gt_masks = _resize_masks(gt_masks, output_height, output_width, mask_resize_method) gt_masks = gt_masks[:, :, :, tf.newaxis] gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes]) # Shape: [h, w, num_classes]. segmentations_for_image = tf.reduce_max( gt_masks * gt_classes_reshaped, axis=0) # Avoid the case where max of an empty array is -inf. segmentations_for_image = tf.maximum(segmentations_for_image, 0.0) segmentation_targets_list.append(segmentations_for_image) segmentation_target = tf.stack(segmentation_targets_list, axis=0) segmentation_weight = tf.stack(segmentation_weights_list, axis=0) return segmentation_target, segmentation_weight class CenterNetDensePoseTargetAssigner(object): """Wrapper to compute targets for DensePose task.""" def __init__(self, stride, num_parts=24): self._stride = stride self._num_parts = num_parts def assign_part_and_coordinate_targets(self, height, width, gt_dp_num_points_list, gt_dp_part_ids_list, gt_dp_surface_coords_list, gt_weights_list=None): """Returns the DensePose part_id and coordinate targets and their indices. The returned values are expected to be used with predicted tensors of size (batch_size, height//self._stride, width//self._stride, 2). The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes] containing the number of DensePose sampled points per box. gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape [num_boxes, max_sampled_points] containing the DensePose part ids (0-indexed) for each sampled point. Note that there may be padding, as boxes may contain a different number of sampled points. gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape [num_boxes, max_sampled_points, 4] containing the DensePose surface coordinates (normalized) for each sampled point. Note that there may be padding. gt_weights_list: A list of 1-D tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [num_total_points, 4] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column is the part index. batch_part_ids: an int tensor of shape [num_total_points, num_parts] holding 1-hot encodings of parts for each sampled point. batch_surface_coords: a float tensor of shape [num_total_points, 2] holding the expected (v, u) coordinates for each sampled point. batch_weights: a float tensor of shape [num_total_points] indicating the weight of each prediction. Note that num_total_points = batch_size * num_boxes * max_sampled_points. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_dp_num_points_list) batch_indices = [] batch_part_ids = [] batch_surface_coords = [] batch_weights = [] for i, (num_points, part_ids, surface_coords, weights) in enumerate( zip(gt_dp_num_points_list, gt_dp_part_ids_list, gt_dp_surface_coords_list, gt_weights_list)): num_boxes, max_sampled_points = ( shape_utils.combined_static_and_dynamic_shape(part_ids)) part_ids_flattened = tf.reshape(part_ids, [-1]) part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts) # Get DensePose coordinates in the output space. surface_coords_abs = densepose_ops.to_absolute_coordinates( surface_coords, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4]) # Each tensor has shape [num_boxes * max_sampled_points]. yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1) # Get the indices (in output space) for the DensePose coordinates. Note # that if self._stride is larger than 1, this will have the effect of # reducing spatial resolution of the groundtruth points. indices_y = tf.cast(yabs, tf.int32) indices_x = tf.cast(xabs, tf.int32) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Create per-point weights. weights_per_point = tf.reshape( tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]), shape=[-1]) # Mask out invalid (i.e. padded) DensePose points. num_points_tiled = tf.tile(num_points[:, tf.newaxis], multiples=[1, max_sampled_points]) range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :], multiples=[num_boxes, 1]) valid_points = tf.math.less(range_tiled, num_points_tiled) valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32) weights_per_point = weights_per_point * valid_points # Shape of [num_boxes * max_sampled_points] integer tensor filled with # current batch index. batch_index = i * tf.ones_like(indices_y, dtype=tf.int32) batch_indices.append( tf.stack([batch_index, indices_y, indices_x, part_ids_flattened], axis=1)) batch_part_ids.append(part_ids_one_hot) batch_surface_coords.append(tf.stack([v, u], axis=1)) batch_weights.append(weights_per_point) batch_indices = tf.concat(batch_indices, axis=0) batch_part_ids = tf.concat(batch_part_ids, axis=0) batch_surface_coords = tf.concat(batch_surface_coords, axis=0) batch_weights = tf.concat(batch_weights, axis=0) return batch_indices, batch_part_ids, batch_surface_coords, batch_weights class CenterNetTrackTargetAssigner(object): """Wrapper to compute targets for tracking task. Reference paper: A Simple Baseline for Multi-Object Tracking [1] [1]: https://arxiv.org/abs/2004.01888 """ def __init__(self, stride, num_track_ids): self._stride = stride self._num_track_ids = num_track_ids def assign_track_targets(self, height, width, gt_track_ids_list, gt_boxes_list, gt_weights_list=None): """Computes the track ID targets. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_track_ids_list: A list of 1-D tensors with shape [num_boxes] corresponding to the track ID of each groundtruth detection box. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_weights_list: A list of 1-D tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [batch_size, num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_weights: a float tensor of shape [batch_size, num_boxes] indicating the weight of each prediction. track_id_targets: An int32 tensor of size [batch_size, num_boxes, num_track_ids] containing the one-hot track ID vector of each groundtruth detection box. """ track_id_targets = tf.one_hot( gt_track_ids_list, depth=self._num_track_ids, axis=-1) if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_weights = [] for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the indices of the box centers. Shape: # indices: [num_boxes, 2] (_, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append(weights) batch_indices = tf.stack(batch_indices, axis=0) batch_weights = tf.stack(batch_weights, axis=0) return batch_indices, batch_weights, track_id_targets def filter_mask_overlap_min_area(masks): """If a pixel belongs to 2 instances, remove it from the larger instance.""" num_instances = tf.shape(masks)[0] def _filter_min_area(): """Helper function to filter non empty masks.""" areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True) per_pixel_area = masks * areas # Make sure background is ignored in argmin. per_pixel_area = (masks * per_pixel_area + (1 - masks) * per_pixel_area.dtype.max) min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32) filtered_masks = ( tf.range(num_instances)[:, tf.newaxis, tf.newaxis] == min_index[tf.newaxis, :, :] ) return tf.cast(filtered_masks, tf.float32) * masks return tf.cond(num_instances > 0, _filter_min_area, lambda: masks) def filter_mask_overlap(masks, method='min_area'): if method == 'min_area': return filter_mask_overlap_min_area(masks) else: raise ValueError('Unknown mask overlap filter type - {}'.format(method)) class CenterNetCornerOffsetTargetAssigner(object): """Wrapper to compute corner offsets for boxes using masks.""" def __init__(self, stride, overlap_resolution='min_area'): """Initializes the corner offset target assigner. Args: stride: int, the stride of the network in output pixels. overlap_resolution: string, specifies how we handle overlapping instance masks. Currently only 'min_area' is supported which assigns overlapping pixels to the instance with the minimum area. """ self._stride = stride self._overlap_resolution = overlap_resolution def assign_corner_offset_targets( self, gt_boxes_list, gt_masks_list): """Computes the corner offset targets and foreground map. For each pixel that is part of any object's foreground, this function computes the relative offsets to the top-left and bottom-right corners of that instance's bounding box. It also returns a foreground map to indicate which pixels contain valid corner offsets. Args: gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_masks_list: A list of float tensors with shape [num_boxes, input_height, input_width] with values in {0, 1} representing instance masks for each object. Returns: corner_offsets: A float tensor of shape [batch_size, height, width, 4] containing, in order, the (y, x) offsets to the top left corner and the (y, x) offsets to the bottom right corner for each foregroung pixel foreground: A float tensor of shape [batch_size, height, width] in which each pixel is set to 1 if it is a part of any instance's foreground (and thus contains valid corner offsets) and 0 otherwise. """ _, input_height, input_width = ( shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) output_height = tf.maximum(input_height // self._stride, 1) output_width = tf.maximum(input_width // self._stride, 1) y_grid, x_grid = tf.meshgrid( tf.range(output_height), tf.range(output_width), indexing='ij') y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32) corner_targets = [] foreground_targets = [] for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list): gt_masks = _resize_masks(gt_masks, output_height, output_width, method=ResizeMethod.NEAREST_NEIGHBOR) gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution) output_height = tf.cast(output_height, tf.float32) output_width = tf.cast(output_width, tf.float32) ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1) ymin, ymax = ymin * output_height, ymax * output_height xmin, xmax = xmin * output_width, xmax * output_width top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5, tf.float32) foreground_targets.append(foreground_target) corner_target = tf.stack([ tf.reduce_sum(top_y * gt_masks, axis=0), tf.reduce_sum(left_x * gt_masks, axis=0), tf.reduce_sum(bottom_y * gt_masks, axis=0), tf.reduce_sum(right_x * gt_masks, axis=0), ], axis=2) corner_targets.append(corner_target) return (tf.stack(corner_targets, axis=0), tf.stack(foreground_targets, axis=0)) class CenterNetTemporalOffsetTargetAssigner(object): """Wrapper to compute target tensors for the temporal offset task. This class has methods that take as input a batch of ground truth tensors (in the form of a list) and returns the targets required to train the temporal offset task. """ def __init__(self, stride): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. """ self._stride = stride def assign_temporal_offset_targets(self, height, width, gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list=None): """Returns the temporal offset targets and their indices. For each ground truth box, this function assigns it the corresponding temporal offset to train the model. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2] containing the spatial offsets of objects' centers compared with the previous frame. gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing flags that indicate if an object has existed in the previous frame. gt_weights_list: A list of tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the expected y and x temporal offset of each object center in the output space. batch_weights: a float tensor of shape [num_boxes] indicating the weight of each prediction. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_weights = [] batch_temporal_offsets = [] for i, (boxes, offsets, match_flags, weights) in enumerate(zip( gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_boxes, 2] # indices: [num_boxes, 2] (_, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. # if an object is not matched, its weight becomes zero. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) weights *= match_flags # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append(weights) batch_temporal_offsets.append(offsets) batch_indices = tf.concat(batch_indices, axis=0) batch_weights = tf.concat(batch_weights, axis=0) batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0) return (batch_indices, batch_temporal_offsets, batch_weights) class DETRTargetAssigner(object): """Target assigner for DETR (https://arxiv.org/abs/2005.12872). Detection Transformer (DETR) matches predicted boxes to groundtruth directly to determine targets instead of matching anchors to groundtruth. Hence, the new target assigner. """ def __init__(self): """Construct Object Detection Target Assigner.""" self._similarity_calc = sim_calc.DETRSimilarity() self._matcher = hungarian_matcher.HungarianBipartiteMatcher() def batch_assign(self, pred_box_batch, gt_box_batch, pred_class_batch, gt_class_targets_batch, gt_weights_batch=None, unmatched_class_label_batch=None): """Batched assignment of classification and regression targets. Args: pred_box_batch: a tensor of shape [batch_size, num_queries, 4] representing predicted bounding boxes. gt_box_batch: a tensor of shape [batch_size, num_queries, 4] representing groundtruth bounding boxes. pred_class_batch: A list of tensors with length batch_size, where each each tensor has shape [num_queries, num_classes] to be used by certain similarity calculators. gt_class_targets_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, num_classes] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. unmatched_class_label_batch: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the `gt_class_targets_batch`. Returns: batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes]. """ pred_box_batch = [ box_list.BoxList(pred_box) for pred_box in tf.unstack(pred_box_batch)] gt_box_batch = [ box_list.BoxList(gt_box) for gt_box in tf.unstack(gt_box_batch)] cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_targets_batch) if unmatched_class_label_batch is None: unmatched_class_label_batch = [None] * len(gt_class_targets_batch) pred_class_batch = tf.unstack(pred_class_batch) for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights, unmatched_class_label) in zip(pred_box_batch, gt_box_batch, pred_class_batch, gt_class_targets_batch, gt_weights_batch, unmatched_class_label_batch): (cls_targets, cls_weights, reg_targets, reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights, unmatched_class_label) cls_targets_list.append(cls_targets) cls_weights_list.append(cls_weights) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights) def assign(self, pred_boxes, gt_boxes, pred_classes, gt_labels, gt_weights=None, unmatched_class_label=None): """Assign classification and regression targets to each box_pred. For a given set of pred_boxes and groundtruth detections, match pred_boxes to gt_boxes and assign classification and regression targets to each box_pred as well as weights based on the resulting match (specifying, e.g., which pred_boxes should not contribute to training loss). pred_boxes that are not matched to anything are given a classification target of `unmatched_cls_target`. Args: pred_boxes: a BoxList representing N pred_boxes gt_boxes: a BoxList representing M groundtruth boxes pred_classes: A tensor with shape [max_num_boxes, num_classes] to be used by certain similarity calculators. gt_labels: a tensor of shape [M, num_classes] with labels for each of the ground_truth boxes. The subshape [num_classes] can be empty (corresponding to scalar inputs). When set to None, gt_labels assumes a binary problem where all ground_truth boxes get a positive label (of 1). gt_weights: a float tensor of shape [M] indicating the weight to assign to all pred_boxes match to a particular groundtruth box. The weights must be in [0., 1.]. If None, all weights are set to 1. Generally no groundtruth boxes with zero weight match to any pred_boxes as matchers are aware of groundtruth weights. Additionally, `cls_weights` and `reg_weights` are calculated using groundtruth weights as an added safety. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). Returns: cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes], where the subshape [num_classes] is compatible with gt_labels which has shape [num_gt_boxes, num_classes]. cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes], representing weights for each element in cls_targets. reg_targets: a float32 tensor with shape [num_pred_boxes, box_code_dimension] reg_weights: a float32 tensor with shape [num_pred_boxes] """ if not unmatched_class_label: unmatched_class_label = tf.constant( [1] + [0] * (gt_labels.shape[1] - 1), tf.float32) if gt_weights is None: num_gt_boxes = gt_boxes.num_boxes_static() if not num_gt_boxes: num_gt_boxes = gt_boxes.num_boxes() gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32) gt_boxes.add_field(fields.BoxListFields.classes, gt_labels) pred_boxes.add_field(fields.BoxListFields.classes, pred_classes) match_quality_matrix = self._similarity_calc.compare( gt_boxes, pred_boxes) match = self._matcher.match(match_quality_matrix, valid_rows=tf.greater(gt_weights, 0)) matched_gt_boxes = match.gather_based_on_match( gt_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes() reg_targets = tf.transpose(tf.stack([ty, tx, th, tw])) cls_targets = match.gather_based_on_match( gt_labels, unmatched_value=unmatched_class_label, ignored_value=unmatched_class_label) reg_weights = match.gather_based_on_match( gt_weights, ignored_value=0., unmatched_value=0.) cls_weights = match.gather_based_on_match( gt_weights, ignored_value=0., unmatched_value=1) # convert cls_weights from per-box_pred to per-class. class_label_shape = tf.shape(cls_targets)[1:] weights_multiple = tf.concat( [tf.constant([1]), class_label_shape], axis=0) cls_weights = tf.expand_dims(cls_weights, -1) cls_weights = tf.tile(cls_weights, weights_multiple) return (cls_targets, cls_weights, reg_targets, reg_weights)
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/target_assigner.py
target_assigner.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import six import tensorflow.compat.v1 as tf from object_detection.core import box_list_ops from object_detection.core import standard_fields as fields class RegionSimilarityCalculator(six.with_metaclass(ABCMeta, object)): """Abstract base class for region similarity calculator.""" def compare(self, boxlist1, boxlist2, scope=None): """Computes matrix of pairwise similarity between BoxLists. This op (to be overridden) computes a measure of pairwise similarity between the boxes in the given BoxLists. Higher values indicate more similarity. Note that this method simply measures similarity and does not explicitly perform a matching. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. scope: Op scope name. Defaults to 'Compare' if None. Returns: a (float32) tensor of shape [N, M] with pairwise similarity score. """ with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope: return self._compare(boxlist1, boxlist2) @abstractmethod def _compare(self, boxlist1, boxlist2): pass class IouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Union (IOU) metric. This class computes pairwise similarity between two BoxLists based on IOU. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise iou scores. """ return box_list_ops.iou(boxlist1, boxlist2) class DETRSimilarity(RegionSimilarityCalculator): """Class to compute similarity for the Detection Transformer model. This class computes pairwise DETR similarity between two BoxLists using a weighted combination of GIOU, classification scores, and the L1 loss. """ def __init__(self, l1_weight=5, giou_weight=2): super().__init__() self.l1_weight = l1_weight self.giou_weight = giou_weight def _compare(self, boxlist1, boxlist2): """Compute pairwise DETR similarity between the two BoxLists. Args: boxlist1: BoxList holding N groundtruth boxes. boxlist2: BoxList holding M predicted boxes. Returns: A tensor with shape [N, M] representing pairwise DETR similarity scores. """ groundtruth_labels = boxlist1.get_field(fields.BoxListFields.classes) predicted_labels = boxlist2.get_field(fields.BoxListFields.classes) classification_scores = tf.matmul(groundtruth_labels, predicted_labels, transpose_b=True) loss = self.l1_weight * box_list_ops.l1( boxlist1, boxlist2) + self.giou_weight * (1 - box_list_ops.giou( boxlist1, boxlist2)) - classification_scores return -loss class NegSqDistSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on the squared distance metric. This class computes pairwise similarity between two BoxLists based on the negative squared distance metric. """ def _compare(self, boxlist1, boxlist2): """Compute matrix of (negated) sq distances. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing negated pairwise squared distance. """ return -1 * box_list_ops.sq_dist(boxlist1, boxlist2) class IoaSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Area (IOA) metric. This class computes pairwise similarity between two BoxLists based on their pairwise intersections divided by the areas of second BoxLists. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOA similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise IOA scores. """ return box_list_ops.ioa(boxlist1, boxlist2) class ThresholdedIouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on thresholded IOU and score. This class computes pairwise similarity between two BoxLists based on IOU and a 'score' present in boxlist1. If IOU > threshold, then the entry in the output pairwise tensor will contain `score`, otherwise 0. """ def __init__(self, iou_threshold=0): """Initialize the ThresholdedIouSimilarity. Args: iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold, then the comparison result will be the foreground probability of the first box, otherwise it will be zero. """ super(ThresholdedIouSimilarity, self).__init__() self._iou_threshold = iou_threshold def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists and score. Args: boxlist1: BoxList holding N boxes. Must have a score field. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing scores threholded by pairwise iou scores. """ ious = box_list_ops.iou(boxlist1, boxlist2) scores = boxlist1.get_field(fields.BoxListFields.scores) scores = tf.expand_dims(scores, axis=1) row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) thresholded_ious = tf.where(ious > self._iou_threshold, row_replicated_scores, tf.zeros_like(ious)) return thresholded_ious
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/region_similarity_calculator.py
region_similarity_calculator.py
import numpy as np import tensorflow.compat.v1 as tf def scale(keypoints, y_scale, x_scale, scope=None): """Scales keypoint coordinates in x and y dimensions. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) new_keypoints = keypoints * [[[y_scale, x_scale]]] return new_keypoints def clip_to_window(keypoints, window, scope=None): """Clips keypoints to a window. This op clips any input keypoints to a window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'ClipToWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) new_keypoints = tf.concat([y, x], 2) return new_keypoints def prune_outside_window(keypoints, window, scope=None): """Prunes keypoints that fall outside a given window. This function replaces keypoints that fall outside the given window with nan. See also clip_to_window which clips any keypoints that fall outside the given window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window outside of which the op should prune the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'PruneOutsideWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) valid_indices = tf.logical_and( tf.logical_and(y >= win_y_min, y <= win_y_max), tf.logical_and(x >= win_x_min, x <= win_x_max)) new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) new_keypoints = tf.concat([new_y, new_x], 2) return new_keypoints def change_coordinate_frame(keypoints, window, scope=None): """Changes coordinate frame of the keypoints to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint coordinates from keypoints of shape [num_instances, num_keypoints, 2] to be relative to this window. An example use case is data augmentation: where we are given groundtruth keypoints and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each groundtruth keypoint to be relative to this new window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, 1.0 / win_width) return new_keypoints def keypoints_to_enclosing_bounding_boxes(keypoints, keypoints_axis=1): """Creates enclosing bounding boxes from keypoints. Args: keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints in [y, x] format. keypoints_axis: An integer indicating the axis that correspond to the keypoint dimension. Returns: A [num_instances, 4] float32 tensor that tightly covers all the keypoints for each instance. """ ymin = tf.math.reduce_min(keypoints[..., 0], axis=keypoints_axis) xmin = tf.math.reduce_min(keypoints[..., 1], axis=keypoints_axis) ymax = tf.math.reduce_max(keypoints[..., 0], axis=keypoints_axis) xmax = tf.math.reduce_max(keypoints[..., 1], axis=keypoints_axis) return tf.stack([ymin, xmin, ymax, xmax], axis=keypoints_axis) def to_normalized_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts absolute keypoint coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: keypoints = keypoint_ops.to_normalized_coordinates(keypoints, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2]. height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, 1.0 / height, 1.0 / width) def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width) def flip_horizontal(keypoints, flip_point, flip_permutation=None, scope=None): """Flips the keypoints horizontally around the flip_point. This operation flips the x coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the x coordinate to flip the keypoints around. flip_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. Default to None or empty list to keep the original order after flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'FlipHorizontal'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if flip_permutation: keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) u = flip_point * 2.0 - u new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def flip_vertical(keypoints, flip_point, flip_permutation=None, scope=None): """Flips the keypoints vertically around the flip_point. This operation flips the y coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the y coordinate to flip the keypoints around. flip_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. Default to None or empty list to keep the original order after flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'FlipVertical'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if flip_permutation: keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) v = flip_point * 2.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def rot90(keypoints, rotation_permutation=None, scope=None): """Rotates the keypoints counter-clockwise by 90 degrees. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] rotation_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the rotated keypoint indices. This is used primarily for keypoints that are not rotation invariant. Default to None or empty list to keep the original order after rotation. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'Rot90'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if rotation_permutation: keypoints = tf.gather(keypoints, rotation_permutation) v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2) v = 1.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def keypoint_weights_from_visibilities(keypoint_visibilities, per_keypoint_weights=None): """Returns a keypoint weights tensor. During training, it is often beneficial to consider only those keypoints that are labeled. This function returns a weights tensor that combines default per-keypoint weights, as well as the visibilities of individual keypoints. The returned tensor satisfies: keypoint_weights[i, k] = per_keypoint_weights[k] * keypoint_visibilities[i, k] where per_keypoint_weights[k] is set to 1 if not provided. Args: keypoint_visibilities: A [num_instances, num_keypoints] boolean tensor indicating whether a keypoint is labeled (and perhaps even visible). per_keypoint_weights: A list or 1-d tensor of length `num_keypoints` with per-keypoint weights. If None, will use 1 for each visible keypoint weight. Returns: A [num_instances, num_keypoints] float32 tensor with keypoint weights. Those keypoints deemed visible will have the provided per-keypoint weight, and all others will be set to zero. """ keypoint_visibilities.get_shape().assert_has_rank(2) if per_keypoint_weights is None: num_keypoints = keypoint_visibilities.shape.as_list()[1] per_keypoint_weight_mult = tf.ones((1, num_keypoints,), dtype=tf.float32) else: per_keypoint_weight_mult = tf.expand_dims(per_keypoint_weights, axis=0) return per_keypoint_weight_mult * tf.cast(keypoint_visibilities, tf.float32) def set_keypoint_visibilities(keypoints, initial_keypoint_visibilities=None): """Sets keypoint visibilities based on valid/invalid keypoints. Some keypoint operations set invisible keypoints (e.g. cropped keypoints) to NaN, without affecting any keypoint "visibility" variables. This function is used to update (or create) keypoint visibilities to agree with visible / invisible keypoint coordinates. Args: keypoints: a float32 tensor of shape [num_instances, num_keypoints, 2]. initial_keypoint_visibilities: a boolean tensor of shape [num_instances, num_keypoints]. If provided, will maintain the visibility designation of a keypoint, so long as the corresponding coordinates are not NaN. If not provided, will create keypoint visibilities directly from the values in `keypoints` (i.e. NaN coordinates map to False, otherwise they map to True). Returns: keypoint_visibilities: a bool tensor of shape [num_instances, num_keypoints] indicating whether a keypoint is visible or not. """ keypoints.get_shape().assert_has_rank(3) if initial_keypoint_visibilities is not None: keypoint_visibilities = tf.cast(initial_keypoint_visibilities, tf.bool) else: keypoint_visibilities = tf.ones_like(keypoints[:, :, 0], dtype=tf.bool) keypoints_with_nan = tf.math.reduce_any(tf.math.is_nan(keypoints), axis=2) keypoint_visibilities = tf.where( keypoints_with_nan, tf.zeros_like(keypoint_visibilities, dtype=tf.bool), keypoint_visibilities) return keypoint_visibilities
123-object-detection
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/core/keypoint_ops.py
keypoint_ops.py