repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/vision/train_spatial_partitioning.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver with spatial partitioning."""
from typing import Sequence
from absl import app
from absl import flags
import gin
import numpy as np
import tensorflow as tf
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.vision import registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
def get_computation_shape_for_model_parallelism(
input_partition_dims: Sequence[int]) -> Sequence[int]:
"""Returns computation shape to be used for TPUStrategy spatial partition.
Args:
input_partition_dims: The number of partitions along each dimension.
Returns:
A list of integers specifying the computation shape.
Raises:
ValueError: If the number of logical devices is not supported.
"""
num_logical_devices = np.prod(input_partition_dims)
if num_logical_devices == 1:
return [1, 1, 1, 1]
elif num_logical_devices == 2:
return [1, 1, 1, 2]
elif num_logical_devices == 4:
return [1, 2, 1, 2]
elif num_logical_devices == 8:
return [2, 2, 1, 2]
elif num_logical_devices == 16:
return [4, 2, 1, 2]
else:
raise ValueError(
'The number of logical devices %d is not supported. Supported numbers '
'are 1, 2, 4, 8, 16' % num_logical_devices)
def create_distribution_strategy(distribution_strategy,
tpu_address,
input_partition_dims=None,
num_gpus=None):
"""Creates distribution strategy to use for computation."""
if input_partition_dims is not None:
if distribution_strategy != 'tpu':
raise ValueError('Spatial partitioning is only supported '
'for TPUStrategy.')
# When `input_partition_dims` is specified create custom TPUStrategy
# instance with computation shape for model parallelism.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_address)
if tpu_address not in ('', 'local'):
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
num_replicas = resolver.get_tpu_system_metadata().num_cores // np.prod(
input_partition_dims)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
num_replicas=num_replicas,
computation_shape=input_partition_dims)
return tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
return distribute_utils.get_distribution_strategy(
distribution_strategy=distribution_strategy,
tpu_address=tpu_address,
num_gpus=num_gpus)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
input_partition_dims = None
if FLAGS.mode == 'train_and_eval':
if np.prod(params.task.train_input_partition_dims) != np.prod(
params.task.eval_input_partition_dims):
raise ValueError('Train and eval input partition dims can not be'
'partitioned on the same node')
else:
input_partition_dims = get_computation_shape_for_model_parallelism(
params.task.train_input_partition_dims)
elif FLAGS.mode == 'train':
if params.task.train_input_partition_dims:
input_partition_dims = get_computation_shape_for_model_parallelism(
params.task.train_input_partition_dims)
elif FLAGS.mode == 'eval' or FLAGS.mode == 'continuous_eval':
if params.task.eval_input_partition_dims:
input_partition_dims = get_computation_shape_for_model_parallelism(
params.task.eval_input_partition_dims)
distribution_strategy = create_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
num_gpus=params.runtime.num_gpus,
input_partition_dims=input_partition_dims,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 5,725 | 36.671053 | 80 | py |
models | models-master/official/vision/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official import vision
from official.utils.testing import mock_task
| 760 | 39.052632 | 74 | py |
models | models-master/official/vision/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vision package definition."""
# pylint: disable=unused-import
from official.vision import configs
from official.vision import tasks
| 744 | 38.210526 | 74 | py |
models | models-master/official/vision/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.utils import summary_manager
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'enable_async_checkpointing',
default=True,
help='A boolean indicating whether to enable async checkpoint saving')
def _run_experiment_with_preemption_recovery(params, model_dir):
"""Runs experiment and tries to reconnect when encounting a preemption."""
keep_training = True
while keep_training:
preemption_watcher = None
try:
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
# pylint: disable=line-too-long
preemption_watcher = None # copybara-replace
# pylint: enable=line-too-long
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir,
summary_manager=None,
eval_summary_manager=summary_manager.maybe_build_eval_summary_manager(
params=params, model_dir=model_dir
),
enable_async_checkpointing=FLAGS.enable_async_checkpointing,
)
keep_training = False
except tf.errors.OpError as e:
if preemption_watcher and preemption_watcher.preemption_message:
preemption_watcher.block_until_worker_exit()
logging.info(
'Some TPU workers had been preempted (message: %s), '
'retarting training from the last checkpoint...',
preemption_watcher.preemption_message)
keep_training = True
else:
raise e from None
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
_run_experiment_with_preemption_recovery(params, model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 3,995 | 36.345794 | 80 | py |
models | models-master/official/vision/evaluation/instance_metrics_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.evaluation import instance_metrics
class InstanceMetricsTest(tf.test.TestCase, parameterized.TestCase):
def test_compute_coco_ap(self):
precisions = [1.0, 1.0, 0.5, 0.8, 0.4, 0.5, 0.2, 0.3]
recalls = [0.0, 0.1, 0.1, 0.5, 0.5, 0.7, 0.7, 1.0]
self.assertAllClose(
instance_metrics.COCOAveragePrecision(recalls_desc=False)(
precisions, recalls
),
0.613861,
atol=1e-4,
)
precisions.reverse()
recalls.reverse()
self.assertAllClose(
instance_metrics.COCOAveragePrecision(recalls_desc=True)(
precisions, recalls
),
0.613861,
atol=1e-4,
)
def test_compute_voc10_ap(self):
precisions = [1.0, 1.0, 0.5, 0.8, 0.4, 0.5, 0.2, 0.3]
recalls = [0.0, 0.1, 0.1, 0.5, 0.5, 0.7, 0.7, 1.0]
self.assertAllClose(
instance_metrics.VOC2010AveragePrecision(recalls_desc=False)(
precisions, recalls
),
0.61,
atol=1e-4,
)
precisions.reverse()
recalls.reverse()
self.assertAllClose(
instance_metrics.VOC2010AveragePrecision(recalls_desc=True)(
precisions, recalls
),
0.61,
atol=1e-4,
)
def test_match_detections_to_gts(self):
coco_matching_algorithm = instance_metrics.COCOMatchingAlgorithm(
iou_thresholds=(0.5, 0.85)
)
detection_is_tp, gt_is_tp = coco_matching_algorithm(
detection_to_gt_ious=tf.constant([[[0.8, 0.7, 0.95], [0.9, 0.6, 0.3]]]),
detection_classes=tf.constant([[1, 1]]),
detection_scores=tf.constant([[0.6, 0.8]]),
gt_classes=tf.constant([[1, 1, 2]]),
)
self.assertAllEqual(detection_is_tp, [[[True, False], [True, True]]])
self.assertAllEqual(
gt_is_tp, [[[True, True], [True, False], [False, False]]]
)
def test_shift_and_rescale_boxes(self):
self.assertAllClose(
instance_metrics._shift_and_rescale_boxes(
boxes=[[[2, 3, 4, 9], [15, 17, 18, 23]]], output_boundary=(20, 20)
),
[[[0.0, 0.0, 2.0, 6.0], [13.0, 14.0, 16.0, 20.0]]],
atol=1e-4,
)
self.assertAllClose(
instance_metrics._shift_and_rescale_boxes(
boxes=[[[-2, -1, 0, 5], [11, 13, 14, 19]]], output_boundary=(20, 20)
),
[[[0.0, 0.0, 2.0, 6.0], [13.0, 14.0, 16.0, 20.0]]],
atol=1e-4,
)
self.assertAllClose(
instance_metrics._shift_and_rescale_boxes(
boxes=[[[2, 3, 4, 9], [15, 17, 18, 23]]], output_boundary=(10, 10)
),
[[[0.0, 0.0, 1.0, 3.0], [6.5, 7.0, 8.0, 10.0]]],
atol=1e-4,
)
self.assertAllClose(
instance_metrics._shift_and_rescale_boxes(
boxes=[[[-2, -1, 0, 5], [11, 13, 14, 19]]], output_boundary=(10, 10)
),
[[[0.0, 0.0, 1.0, 3.0], [6.5, 7.0, 8.0, 10.0]]],
atol=1e-4,
)
self.assertAllClose(
instance_metrics._shift_and_rescale_boxes(
boxes=[[[2, 3, 4, 9], [-1, -1, -1, -1]]], output_boundary=(10, 10)
),
[[[0.0, 0.0, 2.0, 6.0], [0.0, 0.0, 0.0, 0.0]]],
atol=1e-4,
)
def test_count_detection_type(self):
result = instance_metrics._count_detection_type(
detection_type_mask=tf.constant(
[[[True], [True], [False]], [[True], [True], [False]]]
),
detection_classes=tf.constant([[1, 2, 3], [2, 3, 4]]),
flattened_binned_confidence_one_hot=tf.constant([
[False, True, False],
[True, False, False],
[False, True, False],
[True, False, False],
[False, False, True],
[False, False, True],
]),
num_classes=5,
)
self.assertAllClose(
result,
[[
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
]],
atol=1e-4,
)
@parameterized.parameters(True, False)
def test_instance_metrics(self, use_mask):
metrics = instance_metrics.InstanceMetrics(
name='per_class_ap',
num_classes=3,
use_masks=use_mask,
iou_thresholds=(0.1, 0.5),
confidence_thresholds=(0.2, 0.7),
mask_output_boundary=(32, 32),
average_precision_algorithms={
'ap_coco': instance_metrics.COCOAveragePrecision(),
'ap_voc10': instance_metrics.VOC2010AveragePrecision(),
},
)
y_true = {
'boxes': [[
[12, 12, 15, 15],
[16, 16, 20, 20],
[0, 0, 5, 5],
[6, 6, 10, 10],
]],
# 1x1 mask
'masks': [[[[1.0]], [[0.9]], [[0.8]], [[0.7]]]],
'classes': [[2, 1, 1, 1]],
'image_info': tf.constant(
[[[32, 32], [32, 32], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred = {
'detection_boxes': [[
[12, 12, 15, 15],
# The duplicate detection with lower score won't be counted as TP.
[12, 12, 15, 15],
[16, 19, 20, 20],
[1, 1, 6, 6],
[6, 6, 11, 11],
]],
# 1x1 mask
'detection_masks': [[[[1.0]], [[0.9]], [[0.8]], [[0.7]], [[0.6]]]],
'detection_classes': [[1, 1, 1, 2, 1]],
'detection_scores': [[0.3, 0.25, 0.4, 0.6, 0.8]],
}
metrics.update_state(y_true, y_pred)
result = metrics.result()
self.assertAllClose(
result['ap_coco'],
[[0.0, 0.663366, 0.0], [0.0, 0.336634, 0.0]],
atol=1e-4,
)
self.assertAllClose(
result['ap_voc10'],
[[0.0, 2.0 / 3.0, 0.0], [0.0, 1.0 / 3.0, 0.0]],
atol=1e-4,
)
self.assertAllClose(
result['precisions'],
[
[[0.0, 0.5, 0.0], [0.0, 0.25, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
],
atol=1e-4,
)
self.assertAllClose(
result['recalls'],
[
[[0.0, 2.0 / 3.0, 0.0], [0.0, 1.0 / 3.0, 0.0]],
[[0.0, 1.0 / 3, 0.0], [0.0, 1.0 / 3, 0.0]],
],
atol=1e-4,
)
self.assertAllEqual(result['valid_classes'], [False, True, True])
def test_mask_metrics_with_instance_rescaled(self):
metrics = instance_metrics.InstanceMetrics(
name='per_class_ap',
use_masks=True,
num_classes=3,
iou_thresholds=(0.5,),
confidence_thresholds=(0.5,),
mask_output_boundary=(10, 10),
average_precision_algorithms={
'ap_coco': instance_metrics.COCOAveragePrecision(),
'ap_voc10': instance_metrics.VOC2010AveragePrecision(),
},
)
y_true = {
# Instances are rescaled to (10, 10) boundary.
'boxes': [[[0, 0, 8, 8], [10, 10, 20, 20]]],
'masks': [[
[
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 0],
],
[
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
],
]],
'classes': [[1, 2]],
'image_info': tf.constant(
[[[20, 20], [20, 20], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred = {
# Instances are rescaled to (10, 10) boundary.
'detection_boxes': [[[0, 1, 8, 9], [10, 10, 20, 20]]],
'detection_masks': [[
[
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0],
],
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
],
]],
'detection_classes': [[1, 2]],
'detection_scores': [[0.9, 0.8]],
}
metrics.update_state(y_true, y_pred)
result = metrics.result()
self.assertAllClose(
result['precisions'],
[[[0.0, 1.0, 0.0]]],
atol=1e-4,
)
self.assertAllClose(
result['recalls'],
[[[0.0, 1.0, 0.0]]],
atol=1e-4,
)
self.assertAllClose(
result['ap_coco'],
[[0.0, 1.0, 0.0]],
atol=1e-4,
)
self.assertAllClose(
result['ap_voc10'],
[[0.0, 1.0, 0.0]],
atol=1e-4,
)
self.assertAllEqual(result['valid_classes'], [False, True, True])
@parameterized.parameters(True, False)
def test_instance_metrics_with_crowd(self, use_mask):
metrics = instance_metrics.InstanceMetrics(
name='per_class_ap',
use_masks=use_mask,
num_classes=2,
iou_thresholds=(0.5,),
confidence_thresholds=(0.5,),
mask_output_boundary=(20, 20),
average_precision_algorithms={
'ap_coco': instance_metrics.COCOAveragePrecision(),
'ap_voc10': instance_metrics.VOC2010AveragePrecision(),
},
)
y_true = {
'boxes': [[[0, 1, 4, 10], [0, 5, 4, 11]]],
'masks': [[[[1]], [[1]]]],
'classes': [[1, 1]],
'image_info': tf.constant(
[[[20, 20], [20, 20], [1, 1], [0, 0]]], dtype=tf.float32
),
'is_crowds': [[True, False]],
}
y_pred = {
# Over 50% of first box [0, 0, 4, 4] matches the crowd instance
# [0, 1, 4, 10], so it's excluded from the false positives.
'detection_boxes': [[[0, 0, 4, 4], [1, 5, 5, 11]]],
'detection_masks': [[[[1]], [[1]]]],
'detection_classes': [[1, 1]],
'detection_scores': [[0.9, 0.8]],
}
metrics.update_state(y_true, y_pred)
result = metrics.result()
self.assertAllClose(
result['precisions'],
[[[0.0, 1.0]]],
atol=1e-4,
)
self.assertAllClose(
result['recalls'],
[[[0.0, 1.0]]],
atol=1e-4,
)
self.assertAllClose(
result['ap_coco'],
[[0.0, 1.0]],
atol=1e-4,
)
self.assertAllClose(
result['ap_voc10'],
[[0.0, 1.0]],
atol=1e-4,
)
self.assertAllEqual(result['valid_classes'], [False, True])
if __name__ == '__main__':
tf.test.main()
| 10,813 | 29.291317 | 80 | py |
models | models-master/official/vision/evaluation/iou.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IOU Metrics used for semantic segmentation models."""
from typing import Any, Dict, Optional, Sequence, Union
import numpy as np
import tensorflow as tf
class PerClassIoU(tf.keras.metrics.MeanIoU):
"""Computes the per-class Intersection-Over-Union metric.
This metric computes the IOU for each semantic class.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = [(1 / (2 + 2 - 1), 1 / (2 + 2 - 1)] = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
[0.33333334, 0.33333334]
"""
def result(self):
"""Compute IoU for each class via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
class PerClassIoUV2(tf.keras.metrics.Metric):
"""Computes the per-class Intersection-Over-Union metric.
This implementation converts predictions and ground-truth to binary masks,
and uses logical AND and OR to compute intersection and union, which is much
faster than the PerClassIoU (using confusion matrix) above on TPU, but slower
on CPU and GPU.
"""
def __init__(self,
num_classes: int,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
shape: Optional[Sequence[int]] = None,
sparse_y_true: bool = False,
sparse_y_pred: bool = False,
axis: int = -1):
"""Initialization for PerClassIoU.
Args:
num_classes: `int`, number of classes.
name: `str`, name of the metric instance.
dtype: data type of the metric result.
shape: shape of the metrics result.
sparse_y_true: whether ground truth labels are encoded using integers or
dense one-hot vectors.
sparse_y_pred: whether predictions are encoded using integers or dense
one-hot vectors.
axis: (Optional) Defaults to -1. The dimension containing the one-hot
values.
"""
super().__init__(name=name, dtype=dtype)
self.num_classes = num_classes
self.sparse_y_true = sparse_y_true
self.sparse_y_pred = sparse_y_pred
self.axis = axis
# Variable to accumulate the intersection & union.
# intersection = true_positives
if not shape:
shape = [num_classes]
self.intersection_per_class = self.add_weight(
'intersection_per_class', shape, initializer='zeros', dtype=tf.float32)
# union = true_positives + false_positive + false_negative
self.union_per_class = self.add_weight(
'union_per_class', shape, initializer='zeros', dtype=tf.float32)
def reset_state(self):
"""Resets all of the metric state variables."""
self.intersection_per_class.assign(
tf.zeros_like(self.intersection_per_class)
)
self.union_per_class.assign(tf.zeros_like(self.union_per_class))
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state by accumulating the variables.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
"""
if self.sparse_y_true:
# Shape: (..., num_classes, ...)
y_true = tf.one_hot(
tf.cast(y_true, dtype=tf.int32),
self.num_classes,
axis=self.axis,
on_value=True,
off_value=False,
)
if self.sparse_y_pred:
# Shape: (..., num_classes, ...)
y_pred = tf.one_hot(
tf.cast(y_pred, dtype=tf.int32),
self.num_classes,
axis=self.axis,
on_value=True,
off_value=False,
)
one_hot_axis = self.axis if self.axis >= 0 else (
len(y_true.get_shape().as_list()) + self.axis)
# Reduce sum the leading dimensions.
# Shape: (num_classes, ...)
current_intersection = tf.math.count_nonzero(
y_pred & y_true, axis=np.arange(one_hot_axis), dtype=tf.float32
)
# Shape: (num_classes, ...)
current_union = tf.math.count_nonzero(
y_pred | y_true, axis=np.arange(one_hot_axis), dtype=tf.float32
)
self.intersection_per_class.assign_add(
tf.cast(current_intersection, self.intersection_per_class.dtype))
self.union_per_class.assign_add(
tf.cast(current_union, self.union_per_class.dtype))
def result(self) -> tf.Tensor:
"""Computes IoU for each class."""
return tf.cast(
tf.math.divide_no_nan(self.intersection_per_class,
self.union_per_class), self.dtype)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_classes': self.num_classes,
'name': self.name,
'dtype': self.dtype,
'sparse_y_true': self.sparse_y_true,
'sparse_y_pred': self.sparse_y_pred,
'axis': self.axis,
}
| 6,354 | 34.702247 | 79 | py |
models | models-master/official/vision/evaluation/segmentation_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
from typing import Optional, Sequence, Tuple, Union
import tensorflow as tf
from official.vision.evaluation import iou
from official.vision.ops import box_ops
from official.vision.ops import spatial_transform_ops
class MeanIoU(tf.keras.metrics.MeanIoU):
"""Mean IoU metric for semantic segmentation.
This class utilizes tf.keras.metrics.MeanIoU to perform batched mean iou when
both input images and ground-truth masks are resized to the same size
(rescale_predictions=False). It also computes mean IoU on ground-truth
original sizes, in which case, each prediction is rescaled back to the
original image size.
"""
def __init__(self,
num_classes,
rescale_predictions=False,
name=None,
dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, 1], ground-truth masks.
- valid_masks: [batch, height, width, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, height_p, width_p, num_classes], predicated masks.
"""
predictions, masks, valid_masks = preprocess_inputs(
y_true, y_pred, self._rescale_predictions)
# Ignored mask elements are set to zero for fitting the confusion matrix.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(
y_true=flatten_masks,
y_pred=flatten_predictions,
sample_weight=tf.cast(flatten_valid_masks, tf.float32))
class PerClassIoU(MeanIoU):
"""Per class IoU metric for semantic segmentation."""
def result(self):
"""Compute IoU for each class via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
class PerClassIoUV2(iou.PerClassIoUV2):
"""Computes the per-class IoU metric for semantic segmentation.
This implementation converts predictions and ground truth to binary masks,
and uses logical AND and OR to compute intersection and union, which is much
faster than the MeanIoU and PerClassIoU (using confusion matrix) above on TPU,
but slower on CPU and GPU.
"""
def __init__(self,
num_classes: int,
rescale_predictions: bool = False,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
shape: Optional[Sequence[int]] = None,
axis: int = -1):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance.
dtype: data type of the metric result.
shape: shape of the metrics result.
axis: (Optional) Defaults to -1. The dimension containing the one-hot
values.
"""
super().__init__(
num_classes=num_classes, name=name, dtype=dtype, shape=shape, axis=axis)
self._rescale_predictions = rescale_predictions
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, num_layers], ground-truth masks. The
num_layers is 1 by default, while all the operations in this function
support num_layers > 1.
- valid_masks: [batch, height, width, num_layers], valid elements in the
mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, height_p, width_p, num_classes], predicated masks.
"""
logits, gt_masks, valid_masks = preprocess_inputs(y_true, y_pred,
self._rescale_predictions)
valid_masks = tf.cast(valid_masks, tf.bool)
gt_binary_masks = tf.one_hot(
tf.cast(gt_masks[..., 0], dtype=tf.int32),
depth=self.num_classes,
on_value=True,
off_value=False,
)
gt_binary_masks &= valid_masks
predictions_binary_masks = tf.one_hot(
tf.argmax(logits, axis=-1, output_type=tf.int32),
depth=self.num_classes,
on_value=True,
off_value=False,
)
predictions_binary_masks &= valid_masks
super().update_state(
y_true=gt_binary_masks, y_pred=predictions_binary_masks
)
class MeanIoUV2(PerClassIoUV2):
"""Computes the mean IoU metric for semantic segmentation."""
def __init__(self,
target_class_ids: Optional[Tuple[int, ...]] = None,
**kwargs):
"""Initializes the class.
Args:
target_class_ids: computes mean IoU for the target classes. Selects all
the if empty.
**kwargs: the other arguments for initializing the base class.
"""
super().__init__(**kwargs)
self._target_class_ids = target_class_ids
def result(self) -> tf.Tensor:
"""Average the IoUs of all the classes."""
# (num_classes, )
per_class_ious = super().result()
if self._target_class_ids:
# (num_classes, )
target_class_indicators = tf.reduce_max(
tf.one_hot(
self._target_class_ids,
depth=self.num_classes,
dtype=per_class_ious.dtype),
axis=0)
return tf.math.divide_no_nan(
tf.reduce_sum(per_class_ious * target_class_indicators),
tf.reduce_sum(target_class_indicators))
else:
return tf.reduce_mean(per_class_ious)
def preprocess_inputs(
y_true: tf.Tensor, y_pred: tf.Tensor,
rescale_predictions: bool) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Pre-processes the inputs (predictions and ground-truth) of the metrics.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, height, width, num_layers], ground-truth masks. The
num_layers is 1 by default, while all the operations in this function
support num_layers > 1.
- valid_masks: [batch, height, width, num_layers], valid elements in the
mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: tensor [batch, height_p, width_p, num_classes], predicated masks.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale predictions.
Returns:
logits: a float tensor in shape [batch, height, width, num_classes], which
stores the raw output of the model.
gt_masks: an int tensor in shape [batch, height, width, 1], which stores the
ground-truth masks.
valid_masks: a bool tensor in shape [batch, height, width, 1], which
indicates the valid elements of the masks.
"""
logits = y_pred
gt_masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(logits, tuple) or isinstance(logits, list):
logits = tf.concat(logits, axis=0)
gt_masks = tf.concat(gt_masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# The pixel is valid if any layer of the masks is valid at that pixel.
# (batch_size, height, width)
valid_masks = tf.reduce_any(tf.cast(valid_masks, tf.bool), axis=-1)
gt_masks_size = tf.shape(gt_masks)[1:3]
if rescale_predictions:
# Scale back predictions to original image shapes and pad to mask size.
# Note: instead of cropping the masks to image shape (dynamic), here we
# pad the rescaled predictions to mask size (fixed). And update the
# valid_masks to mask out the pixels outside the original image shape.
logits, image_shape_masks = (
_rescale_and_pad_predictions(
logits, images_info, output_size=gt_masks_size))
# Only the area within the original image shape is valid.
# (batch_size, height, width)
valid_masks &= image_shape_masks
else:
logits = tf.image.resize(
logits, gt_masks_size, method=tf.image.ResizeMethod.BILINEAR)
# (batch_size, height, width, 1)
valid_masks = valid_masks[..., tf.newaxis]
return logits, gt_masks, valid_masks
def _rescale_and_pad_predictions(
predictions: tf.Tensor, images_info: tf.Tensor,
output_size: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Scales back predictions to original image shapes and pads to output size.
Args:
predictions: A tensor in shape [batch, height, width, num_classes] which
stores the model predictions.
images_info: A tensor in shape [batch, 4, 2] that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width], [y_scale,
x_scale], [y_offset, x_offset]], where [desired_height, desired_width] is
the actual scaled image size, and [y_scale, x_scale] is the scaling
factor, which is the ratio of scaled dimension / original dimension.
output_size: A list/tuple/tensor stores the size of the padded output in
[output_height, output_width].
Returns:
predictions: A tensor in shape [batch, output_height, output_width,
num_classes] which stores the rescaled and padded predictions.
image_shape_masks: A bool tensor in shape [batch, output_height,
output_width] where the pixels inside the original image shape are true,
otherwise false.
"""
# (batch_size, 2)
image_shape = tf.cast(images_info[:, 0, :], tf.int32)
desired_size = tf.cast(images_info[:, 1, :], tf.float32)
image_scale = tf.cast(images_info[:, 2, :], tf.float32)
offset = tf.cast(images_info[:, 3, :], tf.int32)
rescale_size = tf.cast(tf.math.ceil(desired_size / image_scale), tf.int32)
# Rescale the predictions, then crop to the original image shape and
# finally pad zeros to match the mask size.
predictions = (
spatial_transform_ops.bilinear_resize_with_crop_and_pad(
predictions,
rescale_size,
crop_offset=offset,
crop_size=image_shape,
output_size=output_size))
# (batch_size, 2)
y0_x0 = tf.broadcast_to(
tf.constant([[0, 0]], dtype=image_shape.dtype), tf.shape(image_shape))
# (batch_size, 4)
image_shape_bbox = tf.concat([y0_x0, image_shape], axis=1)
# (batch_size, height, width)
image_shape_masks = box_ops.bbox2mask(
bbox=image_shape_bbox,
image_height=output_size[0],
image_width=output_size[1],
dtype=tf.bool)
return predictions, image_shape_masks
| 13,770 | 39.502941 | 80 | py |
models | models-master/official/vision/evaluation/instance_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for instance detection & segmentation."""
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from official.vision.ops import box_ops
from official.vision.ops import mask_ops
class AveragePrecision(tf.keras.layers.Layer):
"""The algorithm which computes average precision from P-R curve."""
def __init__(self, *args, **kwargs):
# Enforce the `AveragePrecision` to operate in `float32` given the
# implementation requirements.
super().__init__(*args, dtype=tf.float32, **kwargs)
def call(self, precisions, recalls):
"""Computes average precision."""
raise NotImplementedError
class COCOAveragePrecision(AveragePrecision):
"""Average precision in COCO style.
In COCO, AP is defined as the mean of interpolated precisions at a set of 101
equally spaced recall points [0, 0.01, ..., 1]. For each recall point r,
the precision is interpolated to the maximum precision with corresponding
recall r' >= r.
The VOC challenges before 2010 used the similar method, but only 11 recall
points [0, 0.1, ..., 1].
"""
def __init__(
self, num_recall_eval_points: int = 101, recalls_desc: bool = False
):
"""Initialization for COCOAveragePrecision.
Args:
num_recall_eval_points: the number of equally spaced recall points used
for interpolating the precisions.
recalls_desc: If true, the recalls are in descending order.
"""
super().__init__()
self._num_recall_eval_points = num_recall_eval_points
self._recalls_desc = recalls_desc
def get_config(self) -> Dict[str, Any]:
return {
'num_recall_eval_points': self._num_recall_eval_points,
'recalls_desc': self._recalls_desc,
}
def call(self, precisions: tf.Tensor, recalls: tf.Tensor) -> tf.Tensor:
"""Computes average precision.
Args:
precisions: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of precision values at different confidence thresholds with
arbitrary numbers of leading dimensions.
recalls: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of recall values at different confidence threshold with arbitrary
numbers of leading dimensions.
Returns:
A tensor in shape (dim_0, ...), which stores the area under P-R curve.
"""
p = precisions
r = recalls
if not isinstance(p, tf.Tensor):
p = tf.convert_to_tensor(p)
if not isinstance(r, tf.Tensor):
r = tf.convert_to_tensor(r)
if self._recalls_desc:
p = tf.reverse(p, axis=[-1])
r = tf.reverse(r, axis=[-1])
r_eval_points = tf.linspace(0.0, 1.0, self._num_recall_eval_points)
# (dim_0, ..., num_recall_eval_points)
# For each recall eval point, the precision is interpolated to the maximum
# precision with corresponding recall >= the recall eval point.
p_max = tf.reduce_max(
p[..., tf.newaxis, :]
* tf.cast(
r[..., tf.newaxis, :] >= r_eval_points[:, tf.newaxis], dtype=p.dtype
),
axis=-1,
)
# (dim_0, ...)
return tf.reduce_mean(p_max, axis=-1)
class VOC2010AveragePrecision(AveragePrecision):
"""Average precision in VOC 2010 style.
Since VOC 2010, first compute an approximation of the measured P-R curve
with precision monotonically decreasing, by setting the precision for recall
r to the maximum precision obtained for any recall r' >= r. Then compute the
AP as the area under this curve by numerical integration.
"""
def __init__(self, recalls_desc: bool = False):
"""Initialization for VOC10AveragePrecision.
Args:
recalls_desc: If true, the recalls are in descending order.
"""
super().__init__()
self._recalls_desc = recalls_desc
def get_config(self) -> Dict[str, Any]:
return {
'recalls_desc': self._recalls_desc,
}
def call(self, precisions: tf.Tensor, recalls: tf.Tensor) -> tf.Tensor:
"""Computes average precision.
Args:
precisions: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of precision values at different confidence thresholds with
arbitrary numbers of leading dimensions.
recalls: a tensor in shape (dim_0, ..., num_confidences) which stores a
list of recall values at different confidence threshold with arbitrary
numbers of leading dimensions.
Returns:
A tensor in shape (dim_0, ...), which stores the area under P-R curve.
"""
p = precisions
r = recalls
if not isinstance(p, tf.Tensor):
p = tf.convert_to_tensor(p)
if not isinstance(r, tf.Tensor):
r = tf.convert_to_tensor(r)
if self._recalls_desc:
p = tf.reverse(p, axis=[-1])
r = tf.reverse(r, axis=[-1])
axis_indices = list(range(len(p.get_shape())))
# Transpose to (num_confidences, ...), because tf.scan only applies to the
# first dimension.
p = tf.transpose(p, np.roll(axis_indices, 1))
# Compute cumulative maximum in reverse order.
# For example, the reverse cumulative maximum of [5,6,3,4,2,1] is
# [6,6,4,4,2,1].
p = tf.scan(
tf.maximum, elems=p, initializer=tf.reduce_min(p, axis=0), reverse=True
)
# Transpose back to (..., num_confidences)
p = tf.transpose(p, np.roll(axis_indices, -1))
# Prepend 0 to r and compute the delta.
r = tf.concat([tf.zeros_like(r[..., 0:1]), r], axis=-1)
delta_r = tf.roll(r, shift=-1, axis=-1) - r
return tf.reduce_sum(p * delta_r[..., :-1], axis=-1)
class MatchingAlgorithm(tf.keras.layers.Layer):
"""The algorithm which matches detections to ground truths."""
def __init__(self, *args, **kwargs):
# Enforce the `MachingAlgorithm` to operate in `float32` given the
# implementation requirements.
super().__init__(*args, dtype=tf.float32, **kwargs)
def call(
self,
detection_to_gt_ious: tf.Tensor,
detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
gt_classes: tf.Tensor,
):
"""Matches detections to ground truths."""
raise NotImplementedError
class COCOMatchingAlgorithm(MatchingAlgorithm):
"""The detection matching algorithm used in COCO."""
def __init__(self, iou_thresholds: Tuple[float, ...]):
"""Initialization for COCOMatchingAlgorithm.
Args:
iou_thresholds: a list of IoU thresholds.
"""
super().__init__()
self._iou_thresholds = iou_thresholds
def get_config(self) -> Dict[str, Any]:
return {
'iou_thresholds': self._iou_thresholds,
}
def call(
self,
detection_to_gt_ious: tf.Tensor,
detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
gt_classes: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Matches detections to ground truths.
This is the matching algorithm used in COCO. First, sort all the detections
based on the scores from high to low. Then for each detection, iterates
through all ground truth. The unmatched ground truth with the highest IoU
greater than the threshold is matched to the detection.
Args:
detection_to_gt_ious: a tensor in shape of (batch_size, num_detections,
num_gts) which stores the IoUs for each pair of detection and ground
truth.
detection_classes: a tensor in shape of (batch_size, num_detections) which
stores the classes of the detections.
detection_scores: a tensor in shape of (batch_size, num_detections) which
stores the scores of the detections.
gt_classes: a tensor in shape of (batch_size, num_gts) which stores the
classes of the ground truth boxes.
Returns:
Two bool tensors in shape of (batch_size, num_detections,
num_iou_thresholds) and (batch_size, num_gts, num_iou_thresholds) which
indicates whether the detections and ground truths are true positives at
different IoU thresholds.
"""
batch_size = tf.shape(detection_classes)[0]
num_detections = detection_classes.get_shape()[1]
num_gts = gt_classes.get_shape()[1]
num_iou_thresholds = len(self._iou_thresholds)
# (batch_size, num_detections)
sorted_detection_indices = tf.argsort(
detection_scores, axis=1, direction='DESCENDING'
)
# (batch_size, num_detections)
sorted_detection_classes = tf.gather(
detection_classes, sorted_detection_indices, batch_dims=1
)
# (batch_size, num_detections, num_gts)
sorted_detection_to_gt_ious = tf.gather(
detection_to_gt_ious, sorted_detection_indices, batch_dims=1
)
init_loop_vars = (
0, # i: the loop counter
tf.zeros(
[batch_size, num_detections, num_iou_thresholds], dtype=tf.bool
), # detection_is_tp
tf.zeros(
[batch_size, num_gts, num_iou_thresholds], dtype=tf.bool
), # gt_is_tp
)
def _match_detection_to_gt_loop_body(
i: int, detection_is_tp: tf.Tensor, gt_is_tp: tf.Tensor
) -> Tuple[int, tf.Tensor, tf.Tensor]:
"""Iterates the sorted detections and matches to the ground truths."""
# (batch_size, num_gts)
gt_ious = sorted_detection_to_gt_ious[:, i, :]
# (batch_size, num_gts, num_iou_thresholds)
gt_matches_detection = (
# Ground truth is not matched yet.
~gt_is_tp
# IoU is greater than the threshold.
& (gt_ious[:, :, tf.newaxis] > self._iou_thresholds)
# Classes are matched.
& (
(sorted_detection_classes[:, i][:, tf.newaxis] == gt_classes)
& (gt_classes > 0)
)[:, :, tf.newaxis]
)
# Finds the matched ground truth with max IoU.
# If there is no matched ground truth, the argmax op will return index 0
# in this step. It's fine because it will be masked out in the next step.
# (batch_size, num_iou_thresholds)
matched_gt_with_max_iou = tf.argmax(
tf.cast(gt_matches_detection, gt_ious.dtype)
* gt_ious[:, :, tf.newaxis],
axis=1,
output_type=tf.int32,
)
# (batch_size, num_gts, num_iou_thresholds)
gt_matches_detection &= tf.one_hot(
matched_gt_with_max_iou,
depth=num_gts,
on_value=True,
off_value=False,
axis=1,
)
# Updates detection_is_tp
# Map index back to the unsorted detections.
# (batch_size, num_detections, num_iou_thresholds)
detection_is_tp |= (
tf.reduce_any(gt_matches_detection, axis=1, keepdims=True)
& tf.one_hot(
sorted_detection_indices[:, i],
depth=num_detections,
on_value=True,
off_value=False,
axis=-1,
)[:, :, tf.newaxis]
)
detection_is_tp.set_shape([None, num_detections, num_iou_thresholds])
# Updates gt_is_tp
# (batch_size, num_gts, num_iou_thresholds)
gt_is_tp |= gt_matches_detection
gt_is_tp.set_shape([None, num_gts, num_iou_thresholds])
# Returns the updated loop vars.
return (i + 1, detection_is_tp, gt_is_tp)
_, detection_is_tp_result, gt_is_tp_result = tf.while_loop(
cond=lambda i, *_: i < num_detections,
body=_match_detection_to_gt_loop_body,
loop_vars=init_loop_vars,
parallel_iterations=32,
maximum_iterations=num_detections,
)
return detection_is_tp_result, gt_is_tp_result
def _shift_and_rescale_boxes(
boxes: tf.Tensor,
output_boundary: Tuple[int, int],
) -> tf.Tensor:
"""Shift and rescale the boxes to fit in the output boundary.
The output boundary of the boxes can be smaller than the original image size
for accelerating the downstream calculations (dynamic mask resizing, mask IoU,
etc.).
For each image of the batch:
(1) find the upper boundary (min_ymin) and the left boundary (min_xmin) of all
the boxes.
(2) shift all the boxes up min_ymin pixels and left min_xmin pixels.
(3) find the new lower boundary (max_ymax) and the right boundary (max_xmax)
of all the boxes.
(4) if max_ymax > output_height or max_xmax > output_width (some boxes don't
fit in the output boundary), downsample all the boxes by ratio:
min(output_height / max_ymax, output_width / max_xmax). The aspect ratio
is not changed.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. The last dimension is
the pixel coordinates in [ymin, xmin, ymax, xmax] form.
output_boundary: two integers that represent the height and width of the
output.
Returns:
The tensor [batch_size, N, 4] of the output boxes.
"""
boxes = tf.cast(boxes, dtype=tf.float32)
# (batch_size, num_boxes, 1)
is_valid_box = tf.reduce_any(
(boxes[:, :, 2:4] - boxes[:, :, 0:2]) > 0, axis=-1, keepdims=True
)
# (batch_size, 2)
min_ymin_xmin = tf.reduce_min(
tf.where(is_valid_box, boxes, np.inf)[:, :, 0:2],
axis=1,
)
# (batch_size, num_boxes, 4)
boxes = tf.where(
is_valid_box,
boxes - tf.tile(min_ymin_xmin, [1, 2])[:, tf.newaxis, :],
0.0,
)
# (batch_size,)
max_ymax = tf.reduce_max(boxes[:, :, 2], axis=1)
max_xmax = tf.reduce_max(boxes[:, :, 3], axis=1)
# (batch_size,)
y_resize_ratio = output_boundary[0] / max_ymax
x_resize_ratio = output_boundary[1] / max_xmax
# (batch_size,)
downsampling_ratio = tf.math.minimum(
tf.math.minimum(y_resize_ratio, x_resize_ratio), 1.0
)
# (batch_size, num_boxes, 4)
return boxes * downsampling_ratio[:, tf.newaxis, tf.newaxis]
def _count_detection_type(
detection_type_mask: tf.Tensor,
detection_classes: tf.Tensor,
flattened_binned_confidence_one_hot: tf.Tensor,
num_classes: int,
) -> tf.Tensor:
"""Counts detection type grouped by IoU thresholds, classes and confidence bins.
Args:
detection_type_mask: a bool tensor in shape of (batch_size, num_detections,
num_iou_thresholds), which indicate a certain type of detections (e.g.
true postives).
detection_classes: a tensor in shape of (batch_size, num_detections) which
stores the classes of the detections.
flattened_binned_confidence_one_hot: a one-hot bool tensor in shape of
(batch_size * num_detections, num_confidence_bins + 1) which indicates the
binned confidence score of each detection.
num_classes: the number of classes.
Returns:
A tensor in shape of (num_iou_thresholds, num_classes,
num_confidence_bins + 1) which stores the count grouped by IoU thresholds,
classes and confidence bins.
"""
num_iou_thresholds = detection_type_mask.get_shape()[-1]
# (batch_size, num_detections, num_iou_thresholds)
masked_classes = tf.where(
detection_type_mask, detection_classes[..., tf.newaxis], -1
)
# (num_iou_thresholds, batch_size * num_detections)
flattened_masked_classes = tf.transpose(
tf.reshape(masked_classes, [-1, num_iou_thresholds])
)
# (num_iou_thresholds, num_classes, batch_size * num_detections)
flattened_masked_classes_one_hot = tf.one_hot(
flattened_masked_classes, depth=num_classes, axis=1
)
# (num_iou_thresholds * num_classes, batch_size * num_detections)
flattened_masked_classes_one_hot = tf.reshape(
flattened_masked_classes_one_hot,
[num_iou_thresholds * num_classes, -1],
)
# (num_iou_thresholds * num_classes, num_confidence_bins + 1)
count = tf.matmul(
flattened_masked_classes_one_hot,
tf.cast(flattened_binned_confidence_one_hot, tf.float32),
a_is_sparse=True,
b_is_sparse=True,
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
count = tf.reshape(count, [num_iou_thresholds, num_classes, -1])
# Clears the count of class 0 (background)
count *= 1.0 - tf.eye(num_classes, 1, dtype=count.dtype)
return count
class InstanceMetrics(tf.keras.metrics.Metric):
"""Reports the metrics of instance detection & segmentation."""
def __init__(
self,
num_classes: int,
use_masks: bool = False,
iou_thresholds: Tuple[float, ...] = (0.5,),
confidence_thresholds: Tuple[float, ...] = (),
num_confidence_bins: int = 1000,
mask_output_boundary: Tuple[int, int] = (640, 640),
matching_algorithm: Optional[MatchingAlgorithm] = None,
average_precision_algorithms: Optional[
Dict[str, AveragePrecision]
] = None,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
**kwargs
):
"""Initialization for AveragePrecision.
Args:
num_classes: the number of classes.
use_masks: if true, use the masks of the instances when calculating the
metrics, otherwise use the boxes.
iou_thresholds: a sequence of IoU thresholds over which to calculate the
instance metrics.
confidence_thresholds: a sequence of confidence thresholds. If set, also
report precision and recall at each confidence threshold, otherwise,
only report average precision.
num_confidence_bins: the number of confidence bins used for bin sort.
mask_output_boundary: two integers that represent the height and width of
the boundary where the resized instance masks are pasted. For each
example, if any of the detection or ground truth boxes is out of the
boundary, shift and resize all the detection and ground truth boxes of
the example to fit them into the boundary. The output boundary of the
pasted masks can be smaller than the real image size for accelerating
the calculation.
matching_algorithm: the algorithm which matches detections to ground
truths.
average_precision_algorithms: the algorithms which compute average
precision from P-R curve. The keys are used in the metrics results.
name: the name of the metric instance.
dtype: data type of the metric result.
**kwargs: Additional keywords arguments.
"""
super().__init__(name=name, dtype=dtype, **kwargs)
self._num_classes = num_classes
self._use_masks = use_masks
self._iou_thresholds = iou_thresholds
self._confidence_thresholds = confidence_thresholds
self._num_iou_thresholds = len(iou_thresholds)
self._num_confidence_bins = num_confidence_bins
self._mask_output_boundary = mask_output_boundary
if not matching_algorithm:
self._matching_algorithm = COCOMatchingAlgorithm(iou_thresholds)
else:
self._matching_algorithm = matching_algorithm
if not average_precision_algorithms:
self._average_precision_algorithms = {'ap': COCOAveragePrecision()}
else:
self._average_precision_algorithms = average_precision_algorithms
# Variables
self.tp_count = self.add_weight(
'tp_count',
shape=[
self._num_iou_thresholds,
self._num_classes,
self._num_confidence_bins + 1,
],
initializer='zeros',
dtype=tf.float32,
)
self.fp_count = self.add_weight(
'fp_count',
shape=[
self._num_iou_thresholds,
self._num_classes,
self._num_confidence_bins + 1,
],
initializer='zeros',
dtype=tf.float32,
)
self.gt_count = self.add_weight(
'gt_count',
shape=[self._num_classes],
initializer='zeros',
dtype=tf.float32,
)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_classes': self._num_classes,
'use_masks': self._use_masks,
'iou_thresholds': self._iou_thresholds,
'confidence_thresholds': self._confidence_thresholds,
'num_confidence_bins': self._num_confidence_bins,
'mask_output_boundary': self._mask_output_boundary,
'matching_algorithm': self._matching_algorithm,
'average_precision_algorithms': self._average_precision_algorithms,
'name': self.name,
'dtype': self.dtype,
}
def reset_state(self):
"""Resets all of the metric state variables."""
self.tp_count.assign(tf.zeros_like(self.tp_count))
self.fp_count.assign(tf.zeros_like(self.fp_count))
self.gt_count.assign(tf.zeros_like(self.gt_count))
def update_state(
self, y_true: Dict[str, tf.Tensor], y_pred: Dict[str, tf.Tensor]
):
# (batch_size, num_detections, 4) in absolute coordinates.
detection_boxes = tf.cast(y_pred['detection_boxes'], tf.float32)
# (batch_size, num_detections)
detection_classes = tf.cast(y_pred['detection_classes'], tf.int32)
# (batch_size, num_detections)
detection_scores = tf.cast(y_pred['detection_scores'], tf.float32)
# (batch_size, num_gts, 4) in absolute coordinates.
gt_boxes = tf.cast(y_true['boxes'], tf.float32)
# (batch_size, num_gts)
gt_classes = tf.cast(y_true['classes'], tf.int32)
# (batch_size, num_gts)
if 'is_crowds' in y_true:
gt_is_crowd = tf.cast(y_true['is_crowds'], tf.bool)
else:
gt_is_crowd = tf.zeros_like(gt_classes, dtype=tf.bool)
image_scale = tf.tile(y_true['image_info'][:, 2:3, :], multiples=[1, 1, 2])
detection_boxes = detection_boxes / tf.cast(
image_scale, dtype=detection_boxes.dtype
)
# Step 1: Computes IoUs between the detections and the non-crowd ground
# truths and IoAs between the detections and the crowd ground truths.
if not self._use_masks:
# (batch_size, num_detections, num_gts)
detection_to_gt_ious = box_ops.bbox_overlap(detection_boxes, gt_boxes)
detection_to_gt_ioas = box_ops.bbox_intersection_over_area(
detection_boxes, gt_boxes
)
else:
# Use outer boxes to generate the masks if available.
if 'detection_outer_boxes' in y_pred:
detection_boxes = tf.cast(y_pred['detection_outer_boxes'], tf.float32)
# (batch_size, num_detections, mask_height, mask_width)
detection_masks = tf.cast(y_pred['detection_masks'], tf.float32)
# (batch_size, num_gts, gt_mask_height, gt_mask_width)
gt_masks = tf.cast(y_true['masks'], tf.float32)
num_detections = detection_boxes.get_shape()[1]
# (batch_size, num_detections + num_gts, 4)
all_boxes = _shift_and_rescale_boxes(
tf.concat([detection_boxes, gt_boxes], axis=1),
self._mask_output_boundary,
)
detection_boxes = all_boxes[:, :num_detections, :]
gt_boxes = all_boxes[:, num_detections:, :]
# (batch_size, num_detections, num_gts)
detection_to_gt_ious, detection_to_gt_ioas = (
mask_ops.instance_masks_overlap(
detection_boxes,
detection_masks,
gt_boxes,
gt_masks,
output_size=self._mask_output_boundary,
)
)
# (batch_size, num_detections, num_gts)
detection_to_gt_ious = tf.where(
gt_is_crowd[:, tf.newaxis, :], 0.0, detection_to_gt_ious
)
detection_to_crowd_ioas = tf.where(
gt_is_crowd[:, tf.newaxis, :], detection_to_gt_ioas, 0.0
)
# Step 2: counts true positives grouped by IoU thresholds, classes and
# confidence bins.
# (batch_size, num_detections, num_iou_thresholds)
detection_is_tp, _ = self._matching_algorithm(
detection_to_gt_ious, detection_classes, detection_scores, gt_classes
)
# (batch_size * num_detections,)
flattened_binned_confidence = tf.reshape(
tf.cast(detection_scores * self._num_confidence_bins, tf.int32), [-1]
)
# (batch_size * num_detections, num_confidence_bins + 1)
flattened_binned_confidence_one_hot = tf.one_hot(
flattened_binned_confidence, self._num_confidence_bins + 1, axis=1
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
tp_count = _count_detection_type(
detection_is_tp,
detection_classes,
flattened_binned_confidence_one_hot,
self._num_classes,
)
# Step 3: Counts false positives grouped by IoU thresholds, classes and
# confidence bins.
# False positive: detection is not true positive (see above) and not part of
# the crowd ground truth with the same class.
# (batch_size, num_detections, num_gts, num_iou_thresholds)
detection_matches_crowd = (
(detection_to_crowd_ioas[..., tf.newaxis] > self._iou_thresholds)
& (
detection_classes[:, :, tf.newaxis, tf.newaxis]
== gt_classes[:, tf.newaxis, :, tf.newaxis]
)
& (detection_classes[:, :, tf.newaxis, tf.newaxis] > 0)
)
# (batch_size, num_detections, num_iou_thresholds)
detection_matches_any_crowd = tf.reduce_any(
detection_matches_crowd & ~detection_is_tp[:, :, tf.newaxis, :], axis=2
)
detection_is_fp = ~detection_is_tp & ~detection_matches_any_crowd
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
fp_count = _count_detection_type(
detection_is_fp,
detection_classes,
flattened_binned_confidence_one_hot,
self._num_classes,
)
# Step 4: Counts non-crowd groundtruths grouped by classes.
# (num_classes, )
gt_count = tf.reduce_sum(
tf.one_hot(
tf.where(gt_is_crowd, -1, gt_classes), self._num_classes, axis=-1
),
axis=[0, 1],
)
# Clears the count of class 0 (background).
gt_count *= 1.0 - tf.eye(1, self._num_classes, dtype=gt_count.dtype)[0]
# Accumulates the variables.
self.fp_count.assign_add(tf.cast(fp_count, self.fp_count.dtype))
self.tp_count.assign_add(tf.cast(tp_count, self.tp_count.dtype))
self.gt_count.assign_add(tf.cast(gt_count, self.gt_count.dtype))
def result(self) -> Dict[str, tf.Tensor]:
"""Returns the metrics values as a dict.
Returns:
A `dict` containing:
'ap': a float tensor in shape (num_iou_thresholds, num_classes) which
stores the average precision of each class at different IoU thresholds.
'precision': a float tensor in shape (num_confidence_thresholds,
num_iou_thresholds, num_classes) which stores the precision of each
class at different confidence thresholds & IoU thresholds.
'recall': a float tensor in shape (num_confidence_thresholds,
num_iou_thresholds, num_classes) which stores the recall of each
class at different confidence thresholds & IoU thresholds.
'valid_classes': a bool tensor in shape (num_classes,). If False, there
is no instance of the class in the ground truth.
"""
result = {
# (num_classes,)
'valid_classes': self.gt_count != 0,
}
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
tp_count_cum_by_confidence = tf.math.cumsum(
self.tp_count, axis=-1, reverse=True
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
fp_count_cum_by_confidence = tf.math.cumsum(
self.fp_count, axis=-1, reverse=True
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
precisions = tf.math.divide_no_nan(
tp_count_cum_by_confidence,
tp_count_cum_by_confidence + fp_count_cum_by_confidence,
)
# (num_iou_thresholds, num_classes, num_confidence_bins + 1)
recalls = tf.math.divide_no_nan(
tp_count_cum_by_confidence, self.gt_count[..., tf.newaxis]
)
if self._confidence_thresholds:
# If confidence_thresholds is set, reports precision and recall at each
# confidence threshold.
confidence_thresholds = tf.cast(
tf.constant(self._confidence_thresholds, dtype=tf.float32)
* self._num_confidence_bins,
dtype=tf.int32,
)
# (num_confidence_thresholds, num_iou_thresholds, num_classes)
result['precisions'] = tf.gather(
tf.transpose(precisions, [2, 0, 1]), confidence_thresholds
)
result['recalls'] = tf.gather(
tf.transpose(recalls, [2, 0, 1]), confidence_thresholds
)
precisions = tf.reverse(precisions, axis=[-1])
recalls = tf.reverse(recalls, axis=[-1])
result.update(
{
# (num_iou_thresholds, num_classes)
key: ap_algorithm(precisions, recalls)
for key, ap_algorithm in self._average_precision_algorithms.items()
}
)
return result
def get_average_precision_metrics_keys(self):
"""Gets the keys of the average precision metrics in the results."""
return self._average_precision_algorithms.keys()
| 29,104 | 36.123724 | 82 | py |
models | models-master/official/vision/evaluation/panoptic_quality.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Panoptic Quality metric.
Panoptic Quality is an instance-based metric for evaluating the task of
image parsing, aka panoptic segmentation.
Please see the paper for details:
"Panoptic Segmentation", Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother and Piotr Dollar. arXiv:1801.00868, 2018.
Note that this metric class is branched from
https://github.com/tensorflow/models/blob/master/research/deeplab/evaluation/panoptic_quality.py
"""
import collections
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from official.vision.ops import box_ops
_EPSILON = 1e-10
def realdiv_maybe_zero(x, y):
"""Element-wise x / y where y may contain zeros, for those returns 0 too."""
return np.where(
np.less(np.abs(y), _EPSILON), np.zeros_like(x), np.divide(x, y))
def _ids_to_counts(id_array):
"""Given a numpy array, a mapping from each unique entry to its count."""
ids, counts = np.unique(id_array, return_counts=True)
return dict(zip(ids, counts))
class PanopticQuality:
"""Metric class for Panoptic Quality.
"Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother, Piotr Dollar.
https://arxiv.org/abs/1801.00868
"""
def __init__(self, num_categories, ignored_label, max_instances_per_category,
offset):
"""Initialization for PanopticQualityMetric.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset).
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between ground-truth and predicted segments.
"""
self.num_categories = num_categories
self.ignored_label = ignored_label
self.max_instances_per_category = max_instances_per_category
self.offset = offset
self.reset()
def _naively_combine_labels(self, category_mask, instance_mask):
"""Naively creates a combined label array from categories and instances."""
return (category_mask.astype(np.uint32) * self.max_instances_per_category +
instance_mask.astype(np.uint32))
def compare_and_accumulate(self, groundtruths, predictions):
"""Compares predictions with ground-truths, and accumulates the metrics.
It is not assumed that instance ids are unique across different categories.
See for example combine_semantic_and_instance_predictions.py in official
PanopticAPI evaluation code for issues to consider when fusing category
and instance labels.
Instances ids of the ignored category have the meaning that id 0 is "void"
and remaining ones are crowd instances.
Args:
groundtruths: A dictionary contains ground-truth labels. It should contain
the following fields.
- category_mask: A 2D numpy uint16 array of ground-truth per-pixel
category labels.
- instance_mask: A 2D numpy uint16 array of ground-truth per-pixel
instance labels.
predictions: A dictionary contains the model outputs. It should contain
the following fields.
- category_array: A 2D numpy uint16 array of predicted per-pixel
category labels.
- instance_array: A 2D numpy uint16 array of predicted instance labels.
"""
groundtruth_category_mask = groundtruths['category_mask']
groundtruth_instance_mask = groundtruths['instance_mask']
predicted_category_mask = predictions['category_mask']
predicted_instance_mask = predictions['instance_mask']
# First, combine the category and instance labels so that every unique
# value for (category, instance) is assigned a unique integer label.
pred_segment_id = self._naively_combine_labels(predicted_category_mask,
predicted_instance_mask)
gt_segment_id = self._naively_combine_labels(groundtruth_category_mask,
groundtruth_instance_mask)
# Pre-calculate areas for all ground-truth and predicted segments.
gt_segment_areas = _ids_to_counts(gt_segment_id)
pred_segment_areas = _ids_to_counts(pred_segment_id)
# We assume there is only one void segment and it has instance id = 0.
void_segment_id = self.ignored_label * self.max_instances_per_category
# There may be other ignored ground-truth segments with instance id > 0,
# find those ids using the unique segment ids extracted with the area
# computation above.
ignored_segment_ids = {
gt_segment_id for gt_segment_id in gt_segment_areas
if (gt_segment_id //
self.max_instances_per_category) == self.ignored_label
}
# Next, combine the ground-truth and predicted labels. Divide up the pixels
# based on which ground-truth segment and predicted segment they belong to,
# this will assign a different 32-bit integer label to each choice of
# (ground-truth segment, predicted segment), encoded as
# gt_segment_id * offset + pred_segment_id.
intersection_id_array = (
gt_segment_id.astype(np.uint64) * self.offset +
pred_segment_id.astype(np.uint64))
# For every combination of (ground-truth segment, predicted segment) with a
# non-empty intersection, this counts the number of pixels in that
# intersection.
intersection_areas = _ids_to_counts(intersection_id_array)
# Helper function that computes the area of the overlap between a predicted
# segment and the ground-truth void/ignored segment.
def prediction_void_overlap(pred_segment_id):
void_intersection_id = void_segment_id * self.offset + pred_segment_id
return intersection_areas.get(void_intersection_id, 0)
# Compute overall ignored overlap.
def prediction_ignored_overlap(pred_segment_id):
total_ignored_overlap = 0
for ignored_segment_id in ignored_segment_ids:
intersection_id = ignored_segment_id * self.offset + pred_segment_id
total_ignored_overlap += intersection_areas.get(intersection_id, 0)
return total_ignored_overlap
# Sets that are populated with segments which ground-truth/predicted
# segments have been matched with overlapping predicted/ground-truth
# segments respectively.
gt_matched = set()
pred_matched = set()
# Calculate IoU per pair of intersecting segments of the same category.
for intersection_id, intersection_area in intersection_areas.items():
gt_segment_id = int(intersection_id // self.offset)
pred_segment_id = int(intersection_id % self.offset)
gt_category = int(gt_segment_id // self.max_instances_per_category)
pred_category = int(pred_segment_id // self.max_instances_per_category)
if gt_category != pred_category:
continue
# Union between the ground-truth and predicted segments being compared
# does not include the portion of the predicted segment that consists of
# ground-truth "void" pixels.
union = (
gt_segment_areas[gt_segment_id] +
pred_segment_areas[pred_segment_id] - intersection_area -
prediction_void_overlap(pred_segment_id))
iou = intersection_area / union
if iou > 0.5:
self.tp_per_class[gt_category] += 1
self.iou_per_class[gt_category] += iou
gt_matched.add(gt_segment_id)
pred_matched.add(pred_segment_id)
# Count false negatives for each category.
for gt_segment_id in gt_segment_areas:
if gt_segment_id in gt_matched:
continue
category = gt_segment_id // self.max_instances_per_category
# Failing to detect a void segment is not a false negative.
if category == self.ignored_label:
continue
self.fn_per_class[category] += 1
# Count false positives for each category.
for pred_segment_id in pred_segment_areas:
if pred_segment_id in pred_matched:
continue
# A false positive is not penalized if is mostly ignored in the
# ground-truth.
if (prediction_ignored_overlap(pred_segment_id) /
pred_segment_areas[pred_segment_id]) > 0.5:
continue
category = pred_segment_id // self.max_instances_per_category
self.fp_per_class[category] += 1
def _valid_categories(self):
"""Categories with a "valid" value for the metric, have > 0 instances.
We will ignore the `ignore_label` class and other classes which have
`tp + fn + fp = 0`.
Returns:
Boolean array of shape `[num_categories]`.
"""
valid_categories = np.not_equal(
self.tp_per_class + self.fn_per_class + self.fp_per_class, 0)
if self.ignored_label >= 0 and self.ignored_label < self.num_categories:
valid_categories[self.ignored_label] = False
return valid_categories
def result_per_category(self):
"""For supported metrics, return individual per-category metric values.
Returns:
A dictionary contains all per-class metrics, each metrics is a numpy array
of shape `[self.num_categories]`, where index `i` is the metrics value
over only that category.
"""
sq_per_class = realdiv_maybe_zero(self.iou_per_class, self.tp_per_class)
rq_per_class = realdiv_maybe_zero(
self.tp_per_class,
self.tp_per_class + 0.5 * self.fn_per_class + 0.5 * self.fp_per_class)
return {
'sq_per_class': sq_per_class,
'rq_per_class': rq_per_class,
'pq_per_class': np.multiply(sq_per_class, rq_per_class)
}
def result(self, is_thing=None):
"""Computes and returns the detailed metric results over all comparisons.
Args:
is_thing: A boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff."
Returns:
A dictionary with a breakdown of metrics and/or metric factors by things,
stuff, and all categories.
"""
results = self.result_per_category()
valid_categories = self._valid_categories()
# If known, break down which categories are valid _and_ things/stuff.
category_sets = collections.OrderedDict()
category_sets['All'] = valid_categories
if is_thing is not None:
category_sets['Things'] = np.logical_and(valid_categories, is_thing)
category_sets['Stuff'] = np.logical_and(valid_categories,
np.logical_not(is_thing))
for category_set_name, in_category_set in category_sets.items():
if np.any(in_category_set):
results.update({
f'{category_set_name}_pq':
np.mean(results['pq_per_class'][in_category_set]),
f'{category_set_name}_sq':
np.mean(results['sq_per_class'][in_category_set]),
f'{category_set_name}_rq':
np.mean(results['rq_per_class'][in_category_set]),
# The number of categories in this subset.
f'{category_set_name}_num_categories':
np.sum(in_category_set.astype(np.int32)),
})
else:
results.update({
f'{category_set_name}_pq': 0.,
f'{category_set_name}_sq': 0.,
f'{category_set_name}_rq': 0.,
f'{category_set_name}_num_categories': 0
})
return results
def reset(self):
"""Resets the accumulation to the metric class's state at initialization."""
self.iou_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.tp_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fn_per_class = np.zeros(self.num_categories, dtype=np.float64)
self.fp_per_class = np.zeros(self.num_categories, dtype=np.float64)
def _get_instance_class_ids(
category_mask: tf.Tensor,
instance_mask: tf.Tensor,
max_num_instances: int,
ignored_label: int,
) -> tf.Tensor:
"""Get the class id of each instance (index starts from 1)."""
# (batch_size, height, width)
instance_mask = tf.where(
(instance_mask == 0) | (category_mask == ignored_label), -1, instance_mask
)
# (batch_size, height, width, max_num_instances + 1)
instance_binary_mask = tf.one_hot(
instance_mask, max_num_instances + 1, dtype=tf.int32
)
# (batch_size, max_num_instances + 1)
result = tf.reduce_max(
instance_binary_mask * category_mask[..., tf.newaxis], axis=[1, 2]
)
# If not an instance, sets the class id to -1.
return tf.where(result == 0, -1, result)
class PanopticQualityV2(tf.keras.metrics.Metric):
"""Panoptic quality metrics with vectorized implementation.
This implementation is supported on TPU.
"Panoptic Segmentation" by Alexander Kirillov, Kaiming He, Ross Girshick,
Carsten Rother, Piotr Dollar.
https://arxiv.org/abs/1801.00868
"""
def __init__(
self,
num_categories: int,
is_thing: Optional[Tuple[bool, ...]] = None,
max_num_instances: int = 255,
ignored_label: int = 255,
rescale_predictions: bool = False,
name: Optional[str] = None,
dtype: Optional[Union[str, tf.dtypes.DType]] = tf.float32,
):
"""Initialization for PanopticQualityV2.
Args:
num_categories: the number of categories.
is_thing: a boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff". Default to `None`, and it means categories are not
classified into these two categories.
max_num_instances: the maximum number of instances in an image.
ignored_label: a category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
rescale_predictions: whether to scale back prediction to original image
sizes. If True, the image_info of the groundtruth is used to rescale
predictions.
name: string name of the metric instance.
dtype: data type of the metric result.
"""
super().__init__(name=name, dtype=dtype)
self._num_categories = num_categories
if is_thing is not None:
self._is_thing = tf.convert_to_tensor(is_thing)
self._is_thing.set_shape([self._num_categories])
else:
self._is_thing = tf.ones([self._num_categories], dtype=tf.bool)
self._max_num_instances = max_num_instances
self._ignored_label = ignored_label
self._rescale_predictions = rescale_predictions
# Variables
self.tp_count = self.add_weight(
'tp_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.fp_count = self.add_weight(
'fp_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.fn_count = self.add_weight(
'fn_count',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
self.tp_iou_sum = self.add_weight(
'tp_iou_sum',
shape=[self._num_categories],
initializer='zeros',
dtype=tf.float32,
)
def get_config(self) -> Dict[str, Any]:
"""Returns the serializable config of the metric."""
return {
'num_categories': self._num_categories,
'is_thing': self._is_thing,
'max_num_instances': self._max_num_instances,
'ignored_label': self._ignored_label,
'rescale_predictions': self._rescale_predictions,
'name': self.name,
'dtype': self.dtype,
}
def reset_state(self):
"""Resets all of the metric state variables."""
self.tp_count.assign(tf.zeros_like(self.tp_count))
self.fp_count.assign(tf.zeros_like(self.fp_count))
self.fn_count.assign(tf.zeros_like(self.fn_count))
self.tp_iou_sum.assign(tf.zeros_like(self.tp_iou_sum))
def update_state(
self, y_true: Dict[str, tf.Tensor], y_pred: Dict[str, tf.Tensor]
):
category_mask = tf.convert_to_tensor(y_pred['category_mask'], tf.int32)
instance_mask = tf.convert_to_tensor(y_pred['instance_mask'], tf.int32)
gt_category_mask = tf.convert_to_tensor(y_true['category_mask'], tf.int32)
gt_instance_mask = tf.convert_to_tensor(y_true['instance_mask'], tf.int32)
if self._rescale_predictions:
_, height, width = gt_category_mask.get_shape().as_list()
# Instead of cropping the masks to the original image shape (dynamic),
# here we keep the mask shape (fixed) and ignore the pixels outside the
# original image shape.
image_shape = tf.cast(y_true['image_info'][:, 0, :], tf.int32)
# (batch_size, 2)
y0_x0 = tf.broadcast_to(
tf.constant([[0, 0]], dtype=tf.int32), tf.shape(image_shape)
)
# (batch_size, 4)
image_shape_bbox = tf.concat([y0_x0, image_shape], axis=1)
# (batch_size, height, width)
image_shape_masks = box_ops.bbox2mask(
bbox=image_shape_bbox,
image_height=height,
image_width=width,
dtype=tf.bool,
)
# (batch_size, height, width)
category_mask = tf.where(
image_shape_masks, category_mask, self._ignored_label
)
instance_mask = tf.where(image_shape_masks, instance_mask, 0)
gt_category_mask = tf.where(
image_shape_masks, gt_category_mask, self._ignored_label
)
gt_instance_mask = tf.where(image_shape_masks, gt_instance_mask, 0)
self._update_thing_classes(
category_mask, instance_mask, gt_category_mask, gt_instance_mask
)
self._update_stuff_classes(category_mask, gt_category_mask)
def _update_thing_classes(
self,
category_mask: tf.Tensor,
instance_mask: tf.Tensor,
gt_category_mask: tf.Tensor,
gt_instance_mask: tf.Tensor,
):
_, height, width = category_mask.get_shape().as_list()
# (batch_size, num_detections + 1)
instance_class_ids = _get_instance_class_ids(
category_mask,
instance_mask,
self._max_num_instances,
self._ignored_label,
)
# (batch_size, num_gts + 1)
gt_instance_class_ids = _get_instance_class_ids(
gt_category_mask,
gt_instance_mask,
self._max_num_instances,
self._ignored_label,
)
# (batch_size, height, width)
valid_mask = gt_category_mask != self._ignored_label
# (batch_size, height, width, num_detections + 1)
instance_binary_masks = tf.one_hot(
tf.where(instance_mask > 0, instance_mask, -1),
self._max_num_instances + 1,
on_value=True,
off_value=False,
)
# (batch_size, height, width, num_gts + 1)
gt_instance_binary_masks = tf.one_hot(
tf.where(gt_instance_mask > 0, gt_instance_mask, -1),
self._max_num_instances + 1,
on_value=True,
off_value=False,
)
# (batch_size, height * width, num_detections + 1)
flattened_binary_masks = tf.reshape(
instance_binary_masks & valid_mask[..., tf.newaxis],
[-1, height * width, self._max_num_instances + 1],
)
# (batch_size, height * width, num_gts + 1)
flattened_gt_binary_masks = tf.reshape(
gt_instance_binary_masks & valid_mask[..., tf.newaxis],
[-1, height * width, self._max_num_instances + 1],
)
# (batch_size, num_detections + 1, height * width)
flattened_binary_masks = tf.transpose(flattened_binary_masks, [0, 2, 1])
# (batch_size, num_detections + 1, num_gts + 1)
intersection = tf.matmul(
tf.cast(flattened_binary_masks, tf.float32),
tf.cast(flattened_gt_binary_masks, tf.float32),
)
union = (
tf.math.count_nonzero(
flattened_binary_masks, axis=-1, keepdims=True, dtype=tf.float32
)
+ tf.math.count_nonzero(
flattened_gt_binary_masks, axis=-2, keepdims=True, dtype=tf.float32
)
- intersection
)
# (batch_size, num_detections + 1, num_gts + 1)
detection_to_gt_ious = tf.math.divide_no_nan(intersection, union)
detection_matches_gt = (
(detection_to_gt_ious > 0.5)
& (
instance_class_ids[:, :, tf.newaxis]
== gt_instance_class_ids[:, tf.newaxis, :]
)
& (gt_instance_class_ids[:, tf.newaxis, :] > 0)
)
# (batch_size, num_gts + 1)
is_tp = tf.reduce_any(detection_matches_gt, axis=1)
# (batch_size, num_gts + 1)
tp_iou = tf.reduce_max(
tf.where(detection_matches_gt, detection_to_gt_ious, 0), axis=1
)
# (batch_size, num_detections + 1)
is_fp = tf.reduce_any(instance_binary_masks, axis=[1, 2]) & ~tf.reduce_any(
detection_matches_gt, axis=2
)
# (batch_size, height, width, num_detections + 1)
fp_binary_mask = is_fp[:, tf.newaxis, tf.newaxis, :] & instance_binary_masks
# (batch_size, num_detections + 1)
fp_area = tf.math.count_nonzero(
fp_binary_mask, axis=[1, 2], dtype=tf.float32
)
# (batch_size, num_detections + 1)
fp_crowd_or_ignored_area = tf.math.count_nonzero(
fp_binary_mask
& (
(
# An instance detection matches a crowd ground truth instance if
# the instance class of the detection matches the class of the
# ground truth and the instance id of the ground truth is 0 (the
# instance is crowd).
(instance_mask > 0)
& (category_mask > 0)
& (gt_category_mask == category_mask)
& (gt_instance_mask == 0)
)
| (gt_category_mask == self._ignored_label)
)[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
# Don't count the detection as false positive if over 50% pixels of the
# instance detection are crowd of the matching class or ignored pixels in
# ground truth.
# (batch_size, num_detections + 1)
is_fp &= tf.math.divide_no_nan(fp_crowd_or_ignored_area, fp_area) <= 0.5
# (batch_size, num_detections + 1, num_categories)
detection_by_class = tf.one_hot(
instance_class_ids, self._num_categories, on_value=True, off_value=False
)
# (batch_size, num_gts + 1, num_categories)
gt_by_class = tf.one_hot(
gt_instance_class_ids,
self._num_categories,
on_value=True,
off_value=False,
)
# (num_categories,)
gt_count = tf.math.count_nonzero(gt_by_class, axis=[0, 1], dtype=tf.float32)
tp_count = tf.math.count_nonzero(
is_tp[..., tf.newaxis] & gt_by_class, axis=[0, 1], dtype=tf.float32
)
fn_count = gt_count - tp_count
fp_count = tf.math.count_nonzero(
is_fp[..., tf.newaxis] & detection_by_class,
axis=[0, 1],
dtype=tf.float32,
)
tp_iou_sum = tf.reduce_sum(
tf.cast(gt_by_class, tf.float32) * tp_iou[..., tf.newaxis], axis=[0, 1]
)
self.tp_count.assign_add(tp_count)
self.fn_count.assign_add(fn_count)
self.fp_count.assign_add(fp_count)
self.tp_iou_sum.assign_add(tp_iou_sum)
def _update_stuff_classes(
self, category_mask: tf.Tensor, gt_category_mask: tf.Tensor
):
# (batch_size, height, width, num_categories)
category_binary_mask = tf.one_hot(
category_mask, self._num_categories, on_value=True, off_value=False
)
gt_category_binary_mask = tf.one_hot(
gt_category_mask, self._num_categories, on_value=True, off_value=False
)
# (batch_size, height, width)
valid_mask = gt_category_mask != self._ignored_label
# (batch_size, num_categories)
intersection = tf.math.count_nonzero(
category_binary_mask
& gt_category_binary_mask
& valid_mask[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
union = tf.math.count_nonzero(
(category_binary_mask | gt_category_binary_mask)
& valid_mask[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
iou = tf.math.divide_no_nan(intersection, union)
# (batch_size, num_categories)
is_tp = (iou > 0.5) & ~self._is_thing
is_fn = (
tf.reduce_any(gt_category_binary_mask, axis=[1, 2])
& ~self._is_thing
& ~is_tp
)
is_fp = (
tf.reduce_any(category_binary_mask, axis=[1, 2])
& ~self._is_thing
& ~is_tp
)
# (batch_size, height, width, num_categories)
fp_binary_mask = is_fp[:, tf.newaxis, tf.newaxis, :] & category_binary_mask
# (batch_size, num_categories)
fp_area = tf.math.count_nonzero(
fp_binary_mask, axis=[1, 2], dtype=tf.float32
)
fp_ignored_area = tf.math.count_nonzero(
fp_binary_mask
& (gt_category_mask == self._ignored_label)[..., tf.newaxis],
axis=[1, 2],
dtype=tf.float32,
)
# Don't count the detection as false positive if over 50% pixels of the
# stuff detection are ignored pixels in ground truth.
is_fp &= tf.math.divide_no_nan(fp_ignored_area, fp_area) <= 0.5
# (num_categories,)
tp_count = tf.math.count_nonzero(is_tp, axis=0, dtype=tf.float32)
fn_count = tf.math.count_nonzero(is_fn, axis=0, dtype=tf.float32)
fp_count = tf.math.count_nonzero(is_fp, axis=0, dtype=tf.float32)
tp_iou_sum = tf.reduce_sum(tf.cast(is_tp, tf.float32) * iou, axis=0)
self.tp_count.assign_add(tp_count)
self.fn_count.assign_add(fn_count)
self.fp_count.assign_add(fp_count)
self.tp_iou_sum.assign_add(tp_iou_sum)
def result(self) -> Dict[str, tf.Tensor]:
"""Returns the metrics values as a dict."""
# (num_categories,)
tp_fn_fp_count = self.tp_count + self.fn_count + self.fp_count
is_ignore_label = tf.one_hot(
self._ignored_label,
self._num_categories,
on_value=True,
off_value=False,
)
sq_per_class = tf.math.divide_no_nan(
self.tp_iou_sum, self.tp_count
) * tf.cast(~is_ignore_label, tf.float32)
rq_per_class = tf.math.divide_no_nan(
self.tp_count, self.tp_count + 0.5 * self.fp_count + 0.5 * self.fn_count
) * tf.cast(~is_ignore_label, tf.float32)
pq_per_class = sq_per_class * rq_per_class
result = {
# (num_categories,)
'valid_thing_classes': (
(tp_fn_fp_count > 0) & self._is_thing & ~is_ignore_label
),
# (num_categories,)
'valid_stuff_classes': (
(tp_fn_fp_count > 0) & ~self._is_thing & ~is_ignore_label
),
# (num_categories,)
'sq_per_class': sq_per_class,
# (num_categories,)
'rq_per_class': rq_per_class,
# (num_categories,)
'pq_per_class': pq_per_class,
}
return result
| 27,477 | 37.110957 | 96 | py |
models | models-master/official/vision/evaluation/panoptic_quality_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Panoptic Quality metric.
Note that this metric test class is branched from
https://github.com/tensorflow/models/blob/master/research/deeplab/evaluation/panoptic_quality_test.py
"""
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from official.vision.evaluation import panoptic_quality
class PanopticQualityTest(absltest.TestCase):
def test_perfect_match(self):
category_mask = np.zeros([6, 6], np.uint16)
instance_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 1, 1, 1],
[1, 2, 1, 1, 1, 1],
],
dtype=np.uint16)
groundtruths = {
'category_mask': category_mask,
'instance_mask': instance_mask
}
predictions = {
'category_mask': category_mask,
'instance_mask': instance_mask
}
pq_metric = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
pq_metric.compare_and_accumulate(groundtruths, predictions)
np.testing.assert_array_equal(pq_metric.iou_per_class, [2.0])
np.testing.assert_array_equal(pq_metric.tp_per_class, [2])
np.testing.assert_array_equal(pq_metric.fn_per_class, [0])
np.testing.assert_array_equal(pq_metric.fp_per_class, [0])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'], [1.0])
np.testing.assert_array_equal(results['rq_per_class'], [1.0])
np.testing.assert_array_equal(results['sq_per_class'], [1.0])
self.assertAlmostEqual(results['All_pq'], 1.0)
self.assertAlmostEqual(results['All_rq'], 1.0)
self.assertAlmostEqual(results['All_sq'], 1.0)
self.assertEqual(results['All_num_categories'], 1)
def test_totally_wrong(self):
category_mask = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
instance_mask = np.zeros([6, 6], np.uint16)
groundtruths = {
'category_mask': category_mask,
'instance_mask': instance_mask
}
predictions = {
'category_mask': 1 - category_mask,
'instance_mask': instance_mask
}
pq_metric = panoptic_quality.PanopticQuality(
num_categories=2,
ignored_label=2,
max_instances_per_category=1,
offset=16)
pq_metric.compare_and_accumulate(groundtruths, predictions)
np.testing.assert_array_equal(pq_metric.iou_per_class, [0.0, 0.0])
np.testing.assert_array_equal(pq_metric.tp_per_class, [0, 0])
np.testing.assert_array_equal(pq_metric.fn_per_class, [1, 1])
np.testing.assert_array_equal(pq_metric.fp_per_class, [1, 1])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'], [0.0, 0.0])
np.testing.assert_array_equal(results['rq_per_class'], [0.0, 0.0])
np.testing.assert_array_equal(results['sq_per_class'], [0.0, 0.0])
self.assertAlmostEqual(results['All_pq'], 0.0)
self.assertAlmostEqual(results['All_rq'], 0.0)
self.assertAlmostEqual(results['All_sq'], 0.0)
self.assertEqual(results['All_num_categories'], 2)
def test_matches_by_iou(self):
groundtruth_instance_mask = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
good_det_instance_mask = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
groundtruths = {
'category_mask': np.zeros_like(groundtruth_instance_mask),
'instance_mask': groundtruth_instance_mask
}
predictions = {
'category_mask': np.zeros_like(good_det_instance_mask),
'instance_mask': good_det_instance_mask
}
pq_metric = panoptic_quality.PanopticQuality(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16)
pq_metric.compare_and_accumulate(groundtruths, predictions)
# iou(1, 1) = 28/30
# iou(2, 2) = 6 / 8
np.testing.assert_array_almost_equal(pq_metric.iou_per_class,
[28 / 30 + 6 / 8])
np.testing.assert_array_equal(pq_metric.tp_per_class, [2])
np.testing.assert_array_equal(pq_metric.fn_per_class, [0])
np.testing.assert_array_equal(pq_metric.fp_per_class, [0])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'],
[(28 / 30 + 6 / 8) / 2])
np.testing.assert_array_equal(results['rq_per_class'], [1.0])
np.testing.assert_array_equal(results['sq_per_class'],
[(28 / 30 + 6 / 8) / 2])
self.assertAlmostEqual(results['All_pq'], (28 / 30 + 6 / 8) / 2)
self.assertAlmostEqual(results['All_rq'], 1.0)
self.assertAlmostEqual(results['All_sq'], (28 / 30 + 6 / 8) / 2)
self.assertEqual(results['All_num_categories'], 1)
bad_det_instance_mask = np.array(
[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
predictions['instance_mask'] = bad_det_instance_mask
pq_metric.reset()
pq_metric.compare_and_accumulate(groundtruths, predictions)
# iou(1, 1) = 27/32
np.testing.assert_array_almost_equal(pq_metric.iou_per_class, [27 / 32])
np.testing.assert_array_equal(pq_metric.tp_per_class, [1])
np.testing.assert_array_equal(pq_metric.fn_per_class, [1])
np.testing.assert_array_equal(pq_metric.fp_per_class, [1])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'], [27 / 32 / 2])
np.testing.assert_array_equal(results['rq_per_class'], [0.5])
np.testing.assert_array_equal(results['sq_per_class'], [27 / 32])
self.assertAlmostEqual(results['All_pq'], 27 / 32 / 2)
self.assertAlmostEqual(results['All_rq'], 0.5)
self.assertAlmostEqual(results['All_sq'], 27 / 32)
self.assertEqual(results['All_num_categories'], 1)
def test_wrong_instances(self):
category_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 1, 2, 2],
[1, 2, 2, 1, 2, 2],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
groundtruth_instance_mask = np.zeros([6, 6], dtype=np.uint16)
predicted_instance_mask = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
groundtruths = {
'category_mask': category_mask,
'instance_mask': groundtruth_instance_mask
}
predictions = {
'category_mask': category_mask,
'instance_mask': predicted_instance_mask
}
pq_metric = panoptic_quality.PanopticQuality(
num_categories=3,
ignored_label=0,
max_instances_per_category=10,
offset=100)
pq_metric.compare_and_accumulate(groundtruths, predictions)
np.testing.assert_array_equal(pq_metric.iou_per_class, [0.0, 1.0, 0.0])
np.testing.assert_array_equal(pq_metric.tp_per_class, [0, 1, 0])
np.testing.assert_array_equal(pq_metric.fn_per_class, [0, 0, 1])
np.testing.assert_array_equal(pq_metric.fp_per_class, [0, 0, 2])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'], [0.0, 1.0, 0.0])
np.testing.assert_array_equal(results['rq_per_class'], [0.0, 1.0, 0.0])
np.testing.assert_array_equal(results['sq_per_class'], [0.0, 1.0, 0.0])
self.assertAlmostEqual(results['All_pq'], 0.5)
self.assertAlmostEqual(results['All_rq'], 0.5)
self.assertAlmostEqual(results['All_sq'], 0.5)
self.assertEqual(results['All_num_categories'], 2)
def test_instance_order_is_arbitrary(self):
category_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 1, 2, 2],
[1, 2, 2, 1, 2, 2],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
groundtruth_instance_mask = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
predicted_instance_mask = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
groundtruths = {
'category_mask': category_mask,
'instance_mask': groundtruth_instance_mask
}
predictions = {
'category_mask': category_mask,
'instance_mask': predicted_instance_mask
}
pq_metric = panoptic_quality.PanopticQuality(
num_categories=3,
ignored_label=0,
max_instances_per_category=10,
offset=100)
pq_metric.compare_and_accumulate(groundtruths, predictions)
np.testing.assert_array_equal(pq_metric.iou_per_class, [0.0, 1.0, 2.0])
np.testing.assert_array_equal(pq_metric.tp_per_class, [0, 1, 2])
np.testing.assert_array_equal(pq_metric.fn_per_class, [0, 0, 0])
np.testing.assert_array_equal(pq_metric.fp_per_class, [0, 0, 0])
results = pq_metric.result()
np.testing.assert_array_equal(results['pq_per_class'], [0.0, 1.0, 1.0])
np.testing.assert_array_equal(results['rq_per_class'], [0.0, 1.0, 1.0])
np.testing.assert_array_equal(results['sq_per_class'], [0.0, 1.0, 1.0])
self.assertAlmostEqual(results['All_pq'], 1.0)
self.assertAlmostEqual(results['All_rq'], 1.0)
self.assertAlmostEqual(results['All_sq'], 1.0)
self.assertEqual(results['All_num_categories'], 2)
class PanopticQualityV2Test(tf.test.TestCase):
def test_perfect_match(self):
panoptic_metrics = panoptic_quality.PanopticQualityV2(
name='panoptic_metrics',
num_categories=2,
)
y_true = {
'category_mask': tf.ones([1, 6, 6], dtype=tf.int32),
'instance_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 1, 1, 1],
[1, 2, 1, 1, 1, 1],
]],
'image_info': tf.constant(
[[[6, 6], [6, 6], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred = y_true
panoptic_metrics.update_state(y_true, y_pred)
result = panoptic_metrics.result()
self.assertAllEqual(result['valid_thing_classes'], [False, True])
self.assertAllEqual(result['valid_stuff_classes'], [False, False])
self.assertAllClose(result['sq_per_class'], [0.0, 1.0], atol=1e-4)
self.assertAllClose(result['rq_per_class'], [0.0, 1.0], atol=1e-4)
self.assertAllClose(result['pq_per_class'], [0.0, 1.0], atol=1e-4)
def test_totally_wrong(self):
panoptic_metrics = panoptic_quality.PanopticQualityV2(
name='panoptic_metrics',
num_categories=4,
)
y_true = {
'category_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 1, 1, 1],
[1, 2, 1, 1, 1, 1],
]],
'instance_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 1, 1, 1],
[1, 2, 1, 1, 1, 1],
]],
'image_info': tf.constant(
[[[6, 6], [6, 6], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred = {
'category_mask': tf.constant(y_true['category_mask']) + 1,
'instance_mask': y_true['instance_mask'],
}
panoptic_metrics.update_state(y_true, y_pred)
result = panoptic_metrics.result()
self.assertAllEqual(
result['valid_thing_classes'], [False, True, True, True]
)
self.assertAllEqual(
result['valid_stuff_classes'], [False, False, False, False]
)
self.assertAllClose(result['sq_per_class'], [0.0, 0.0, 0.0, 0.0], atol=1e-4)
self.assertAllClose(result['rq_per_class'], [0.0, 0.0, 0.0, 0.0], atol=1e-4)
self.assertAllClose(result['pq_per_class'], [0.0, 0.0, 0.0, 0.0], atol=1e-4)
def test_matches_by_iou(self):
panoptic_metrics = panoptic_quality.PanopticQualityV2(
name='panoptic_metrics',
num_categories=2,
)
y_true = {
'category_mask': tf.ones([1, 6, 6], dtype=tf.int32),
'instance_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
]],
'image_info': tf.constant(
[[[6, 6], [6, 6], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred1 = {
'category_mask': tf.ones([1, 6, 6], dtype=tf.int32),
'instance_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
]],
}
panoptic_metrics.update_state(y_true, y_pred1)
result1 = panoptic_metrics.result()
self.assertAllEqual(result1['valid_thing_classes'], [False, True])
self.assertAllEqual(result1['valid_stuff_classes'], [False, False])
self.assertAllClose(
result1['sq_per_class'], [0.0, (28 / 30 + 6 / 8) / 2], atol=1e-4
)
self.assertAllClose(result1['rq_per_class'], [0.0, 1.0], atol=1e-4)
self.assertAllClose(
result1['pq_per_class'], [0.0, (28 / 30 + 6 / 8) / 2], atol=1e-4
)
panoptic_metrics.reset_state()
y_pred2 = {
'category_mask': tf.ones([1, 6, 6], dtype=tf.int32),
'instance_mask': [[
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
]],
}
panoptic_metrics.update_state(y_true, y_pred2)
result2 = panoptic_metrics.result()
self.assertAllEqual(result2['valid_thing_classes'], [False, True])
self.assertAllEqual(result2['valid_stuff_classes'], [False, False])
self.assertAllClose(result2['sq_per_class'], [0.0, 27 / 32], atol=1e-4)
self.assertAllClose(result2['rq_per_class'], [0.0, 1 / 2], atol=1e-4)
self.assertAllClose(result2['pq_per_class'], [0.0, 27 / 64], atol=1e-4)
def test_thing_and_stuff(self):
panoptic_metrics = panoptic_quality.PanopticQualityV2(
name='panoptic_metrics',
num_categories=10,
is_thing=[
False,
True,
True,
False,
True,
False,
True,
False,
True,
False,
],
max_num_instances=15,
ignored_label=255,
)
y_true = {
'category_mask': [[
[6, 6, 4, 6, 2, 5, 5],
[6, 8, 4, 3, 2, 5, 5],
]],
'instance_mask': [[
[1, 1, 2, 5, 3, 0, 0],
[1, 6, 2, 0, 4, 0, 0],
]],
'image_info': tf.constant(
[[[2, 7], [2, 7], [1, 1], [0, 0]]], dtype=tf.float32
),
}
y_pred = {
'category_mask': [[
[6, 4, 4, 6, 2, 255, 255],
[6, 6, 4, 3, 255, 255, 7],
]],
'instance_mask': [[
[1, 2, 2, 5, 0, 0, 0],
[1, 6, 2, 0, 0, 0, 0],
]],
}
panoptic_metrics.update_state(y_true, y_pred)
result = panoptic_metrics.result()
self.assertAllEqual(
result['valid_thing_classes'],
[False, False, True, False, True, False, True, False, True, False],
)
self.assertAllEqual(
result['valid_stuff_classes'],
[False, False, False, True, False, True, False, True, False, False],
)
self.assertAllClose(
result['sq_per_class'],
[0.0, 0.0, 0.0, 1.0, 0.666667, 0.0, 0.833333, 0.0, 0.0, 0.0],
atol=1e-4,
)
self.assertAllClose(
result['rq_per_class'],
[0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.8, 0.0, 0.0, 0.0],
atol=1e-4,
)
self.assertAllClose(
result['pq_per_class'],
[0.0, 0.0, 0.0, 1.0, 0.666667, 0.0, 0.666667, 0.0, 0.0, 0.0],
atol=1e-4,
)
if __name__ == '__main__':
absltest.main()
| 17,704 | 33.580078 | 101 | py |
models | models-master/official/vision/evaluation/panoptic_quality_evaluator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The panoptic quality evaluator.
The following snippet demonstrates the use of interfaces:
evaluator = PanopticQualityEvaluator(...)
for _ in range(num_evals):
for _ in range(num_batches_per_eval):
predictions, groundtruth = predictor.predict(...) # pop a batch.
evaluator.update_state(groundtruths, predictions)
evaluator.result() # finish one full eval and reset states.
See also: https://github.com/cocodataset/cocoapi/
"""
import numpy as np
import tensorflow as tf
from official.vision.evaluation import panoptic_quality
def _crop_padding(mask, image_info):
"""Crops padded masks to match original image shape.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and preprocessed
images.
Returns:
cropped and padded masks: tf.Tensor
"""
image_shape = tf.cast(image_info[0, :], tf.int32)
mask = tf.image.crop_to_bounding_box(
tf.expand_dims(mask, axis=-1), 0, 0,
image_shape[0], image_shape[1])
return tf.expand_dims(mask[:, :, 0], axis=0)
class PanopticQualityEvaluator:
"""Panoptic Quality metric class."""
def __init__(self, num_categories, ignored_label, max_instances_per_category,
offset, is_thing=None, rescale_predictions=False):
"""Constructs Panoptic Quality evaluation class.
The class provides the interface to Panoptic Quality metrics_fn.
Args:
num_categories: The number of segmentation categories (or "classes" in the
dataset).
ignored_label: A category id that is ignored in evaluation, e.g. the void
label as defined in COCO panoptic segmentation dataset.
max_instances_per_category: The maximum number of instances for each
category. Used in ensuring unique instance labels.
offset: The maximum number of unique labels. This is used, by multiplying
the ground-truth labels, to generate unique ids for individual regions
of overlap between ground-truth and predicted segments.
is_thing: A boolean array of length `num_categories`. The entry
`is_thing[category_id]` is True iff that category is a "thing" category
instead of "stuff." Default to `None`, and it means categories are not
classified into these two categories.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, groundtruths['image_info'] is used to rescale
predictions.
"""
self._pq_metric_module = panoptic_quality.PanopticQuality(
num_categories, ignored_label, max_instances_per_category, offset)
self._is_thing = is_thing
self._rescale_predictions = rescale_predictions
self._required_prediction_fields = ['category_mask', 'instance_mask']
self._required_groundtruth_fields = ['category_mask', 'instance_mask']
self.reset_states()
@property
def name(self):
return 'panoptic_quality'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._pq_metric_module.reset()
def result(self):
"""Evaluates detection results, and reset_states."""
results = self._pq_metric_module.result(self._is_thing)
self.reset_states()
return results
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
if groundtruths:
labels = tf.nest.map_structure(lambda x: x.numpy(), groundtruths)
numpy_groundtruths = {}
for key, val in labels.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_groundtruths[key] = val
else:
numpy_groundtruths = groundtruths
if predictions:
outputs = tf.nest.map_structure(lambda x: x.numpy(), predictions)
numpy_predictions = {}
for key, val in outputs.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_predictions[key] = val
else:
numpy_predictions = predictions
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update and aggregate detection results and ground-truth data.
Args:
groundtruths: a dictionary of Tensors including the fields below. See also
different parsers under `../dataloader` for more details.
Required fields:
- category_mask: a numpy array of uint16 of shape [batch_size, H, W].
- instance_mask: a numpy array of uint16 of shape [batch_size, H, W].
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
predictions: a dictionary of tensors including the fields below. See
different parsers under `../dataloader` for more details.
Required fields:
- category_mask: a numpy array of uint16 of shape [batch_size, H, W].
- instance_mask: a numpy array of uint16 of shape [batch_size, H, W].
Raises:
ValueError: if the required prediction or ground-truth fields are not
present in the incoming `predictions` or `groundtruths`.
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths,
predictions)
for k in self._required_prediction_fields:
if k not in predictions:
raise ValueError(
'Missing the required key `{}` in predictions!'.format(k))
for k in self._required_groundtruth_fields:
if k not in groundtruths:
raise ValueError(
'Missing the required key `{}` in groundtruths!'.format(k))
if self._rescale_predictions:
for idx in range(len(groundtruths['category_mask'])):
image_info = groundtruths['image_info'][idx]
groundtruths_ = {
'category_mask':
_crop_padding(groundtruths['category_mask'][idx], image_info),
'instance_mask':
_crop_padding(groundtruths['instance_mask'][idx], image_info),
}
predictions_ = {
'category_mask':
_crop_padding(predictions['category_mask'][idx], image_info),
'instance_mask':
_crop_padding(predictions['instance_mask'][idx], image_info),
}
groundtruths_, predictions_ = self._convert_to_numpy(
groundtruths_, predictions_)
self._pq_metric_module.compare_and_accumulate(
groundtruths_, predictions_)
else:
for idx in range(len(groundtruths['category_mask'])):
groundtruths_ = {
'category_mask': groundtruths['category_mask'][idx],
'instance_mask': groundtruths['instance_mask'][idx]
}
predictions_ = {
'category_mask': predictions['category_mask'][idx],
'instance_mask': predictions['instance_mask'][idx]
}
self._pq_metric_module.compare_and_accumulate(groundtruths_,
predictions_)
| 7,949 | 39.769231 | 80 | py |
models | models-master/official/vision/evaluation/wod_detection_evaluator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D detection evaluator for the Waymo Open Dataset."""
import pprint
from absl import logging
import tensorflow as tf
from official.vision.ops import box_ops
from waymo_open_dataset import label_pb2
from waymo_open_dataset.metrics.python import wod_detection_evaluator
from waymo_open_dataset.protos import breakdown_pb2
from waymo_open_dataset.protos import metrics_pb2
def get_2d_detection_default_config():
"""Returns the config proto for WOD 2D detection Evaluation."""
config = metrics_pb2.Config()
config.breakdown_generator_ids.append(breakdown_pb2.Breakdown.OBJECT_TYPE)
difficulty = config.difficulties.add()
difficulty.levels.append(label_pb2.Label.LEVEL_1)
difficulty.levels.append(label_pb2.Label.LEVEL_2)
config.breakdown_generator_ids.append(breakdown_pb2.Breakdown.ALL_BUT_SIGN)
difficulty = config.difficulties.add()
difficulty.levels.append(label_pb2.Label.LEVEL_1)
difficulty.levels.append(label_pb2.Label.LEVEL_2)
config.matcher_type = metrics_pb2.MatcherProto.TYPE_HUNGARIAN
config.iou_thresholds.append(0.0)
config.iou_thresholds.append(0.7)
config.iou_thresholds.append(0.5)
config.iou_thresholds.append(0.5)
config.iou_thresholds.append(0.5)
config.box_type = label_pb2.Label.Box.TYPE_2D
for i in range(100):
config.score_cutoffs.append(i * 0.01)
config.score_cutoffs.append(1.0)
return config
class WOD2dDetectionEvaluator(wod_detection_evaluator.WODDetectionEvaluator):
"""WOD 2D detection evaluation metric class."""
def __init__(self, config=None):
if config is None:
config = get_2d_detection_default_config()
super().__init__(config=config)
def _remove_padding(self, tensor_dict, num_valid):
"""Remove the paddings of the prediction/groundtruth data."""
result_tensor_dict = {}
gather_indices = tf.range(num_valid)
for k, v in tensor_dict.items():
if 'frame_id' in k:
result_tensor_dict[k] = tf.tile([v], [num_valid])
else:
result_tensor_dict[k] = tf.gather(v, gather_indices)
return result_tensor_dict
def update_state(self, groundtruths, predictions):
"""Update the metrics state with prediction and ground-truth data.
Args:
groundtruths: a dictionary of Tensors including the fields below.
Required fields:
- source_id: a numpy array of int or string of shape [batch_size].
- num_detections: a numpy array of int of shape [batch_size].
- boxes: a numpy array of float of shape [batch_size, K, 4].
- classes: a numpy array of int of shape [batch_size, K].
- difficulties: a numpy array of int of shape [batch_size, K].
predictions: a dictionary of tensors including the fields below.
Required fields:
- source_id: a numpy array of int or string of shape [batch_size].
- image_info: a numpy array of float of shape [batch_size, 4, 2].
- num_detections: a numpy array of int of shape [batch_size].
- detection_boxes: a numpy array of float of shape [batch_size, K, 4].
- detection_classes: a numpy array of int of shape [batch_size, K].
- detection_scores: a numpy array of float of shape [batch_size, K].
"""
# Preprocess potentially aggregated tensors.
for k, v in groundtruths.items():
if isinstance(v, tuple):
groundtruths[k] = tf.concat(v, axis=0)
for k, v in predictions.items():
if isinstance(v, tuple):
predictions[k] = tf.concat(v, axis=0)
# Change cyclists' type id from 3 to 4, where 3 is reserved for sign.
groundtruth_type = tf.cast(groundtruths['classes'], tf.uint8)
groundtruth_type = tf.where(
tf.equal(groundtruth_type, 3),
tf.ones_like(groundtruth_type) * 4, groundtruth_type)
prediction_type = tf.cast(predictions['detection_classes'], tf.uint8)
prediction_type = tf.where(
tf.equal(prediction_type, 3),
tf.ones_like(prediction_type) * 4, prediction_type)
# Rescale the detection boxes back to original scale.
image_scale = tf.tile(predictions['image_info'][:, 2:3, :], (1, 1, 2))
prediction_bbox = predictions['detection_boxes'] / image_scale
batch_size = tf.shape(groundtruths['source_id'])[0]
for i in tf.range(batch_size):
frame_groundtruths = {
'ground_truth_frame_id':
groundtruths['source_id'][i],
'ground_truth_bbox':
box_ops.yxyx_to_cycxhw(
tf.cast(groundtruths['boxes'][i], tf.float32)),
'ground_truth_type':
groundtruth_type[i],
'ground_truth_difficulty':
tf.cast(groundtruths['difficulties'][i], tf.uint8),
}
frame_groundtruths = self._remove_padding(
frame_groundtruths, groundtruths['num_detections'][i])
frame_predictions = {
'prediction_frame_id':
groundtruths['source_id'][i],
'prediction_bbox':
box_ops.yxyx_to_cycxhw(
tf.cast(prediction_bbox[i], tf.float32)),
'prediction_type':
prediction_type[i],
'prediction_score':
tf.cast(predictions['detection_scores'][i], tf.float32),
'prediction_overlap_nlz':
tf.zeros_like(predictions['detection_scores'][i], dtype=tf.bool)
}
frame_predictions = self._remove_padding(frame_predictions,
predictions['num_detections'][i])
super().update_state(frame_groundtruths, frame_predictions)
def evaluate(self):
"""Compute the final metrics."""
ap, _, _, _, _, _, _ = super().evaluate()
metric_dict = {}
for i, name in enumerate(self._breakdown_names):
# Skip sign metrics in 2d detection task.
if 'SIGN' in name:
continue
metric_dict['WOD metrics/{}/AP'.format(name)] = ap[i]
pp = pprint.PrettyPrinter()
logging.info('WOD Detection Metrics: \n %s', pp.pformat(metric_dict))
return metric_dict
| 6,605 | 39.777778 | 80 | py |
models | models-master/official/vision/evaluation/segmentation_metrics_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_metrics."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.evaluation import segmentation_metrics
class SegmentationMetricsTest(parameterized.TestCase, tf.test.TestCase):
def _create_test_data(self):
y_pred_cls0 = tf.constant([[1, 1, 0], [1, 1, 0], [0, 0, 0]],
dtype=tf.uint16)[tf.newaxis, :, :, tf.newaxis]
y_pred_cls1 = tf.constant([[0, 0, 0], [0, 0, 1], [0, 0, 1]],
dtype=tf.uint16)[tf.newaxis, :, :, tf.newaxis]
y_pred = tf.concat((y_pred_cls0, y_pred_cls1), axis=-1)
y_true = {
'masks':
tf.constant(
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]],
dtype=tf.uint16)[tf.newaxis, :, :, tf.newaxis],
'valid_masks':
tf.ones([1, 6, 6, 1], dtype=tf.bool),
'image_info':
tf.constant([[[6, 6], [3, 3], [0.5, 0.5], [0, 0]]],
dtype=tf.float32)
}
return y_pred, y_true
@parameterized.parameters((True, True), (False, False), (True, False),
(False, True))
def test_mean_iou_metric(self, rescale_predictions, use_v2):
tf.config.experimental_run_functions_eagerly(True)
if use_v2:
mean_iou_metric = segmentation_metrics.MeanIoUV2(
num_classes=2, rescale_predictions=rescale_predictions)
else:
mean_iou_metric = segmentation_metrics.MeanIoU(
num_classes=2, rescale_predictions=rescale_predictions)
y_pred, y_true = self._create_test_data()
# Disable autograph for correct coverage statistics.
update_fn = tf.autograph.experimental.do_not_convert(
mean_iou_metric.update_state)
update_fn(y_true=y_true, y_pred=y_pred)
miou = mean_iou_metric.result()
self.assertAlmostEqual(miou.numpy(), 0.762, places=3)
@parameterized.parameters((True, True), (False, False), (True, False),
(False, True))
def test_per_class_mean_iou_metric(self, rescale_predictions, use_v2):
if use_v2:
per_class_iou_metric = segmentation_metrics.PerClassIoUV2(
num_classes=2, rescale_predictions=rescale_predictions)
else:
per_class_iou_metric = segmentation_metrics.PerClassIoU(
num_classes=2, rescale_predictions=rescale_predictions)
y_pred, y_true = self._create_test_data()
# Disable autograph for correct coverage statistics.
update_fn = tf.autograph.experimental.do_not_convert(
per_class_iou_metric.update_state)
update_fn(y_true=y_true, y_pred=y_pred)
per_class_miou = per_class_iou_metric.result()
self.assertAllClose(per_class_miou.numpy(), [0.857, 0.667], atol=1e-3)
def test_mean_iou_metric_v2_target_class_ids(self):
tf.config.experimental_run_functions_eagerly(True)
mean_iou_metric = segmentation_metrics.MeanIoUV2(
num_classes=2, target_class_ids=[0])
y_pred, y_true = self._create_test_data()
# Disable autograph for correct coverage statistics.
update_fn = tf.autograph.experimental.do_not_convert(
mean_iou_metric.update_state)
update_fn(y_true=y_true, y_pred=y_pred)
miou = mean_iou_metric.result()
self.assertAlmostEqual(miou.numpy(), 0.857, places=3)
if __name__ == '__main__':
tf.test.main()
| 4,013 | 40.8125 | 77 | py |
models | models-master/official/vision/evaluation/iou_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iou metric."""
import tensorflow as tf
from official.vision.evaluation import iou
class IoUTest(tf.test.TestCase):
def test_config(self):
m_obj = iou.PerClassIoU(num_classes=2, name='per_class_iou')
self.assertEqual(m_obj.name, 'per_class_iou')
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = iou.PerClassIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, 'per_class_iou')
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = iou.PerClassIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = [1 / (2 + 2 - 1), 1 / (2 + 2 - 1)]
self.assertAllClose(expected_result, result, atol=1e-3)
def test_weighted(self):
y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)
y_true = tf.constant([0, 0, 1, 1])
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
m_obj = iou.PerClassIoU(num_classes=2)
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = [0.2 / (0.6 + 0.5 - 0.2), 0.1 / (0.4 + 0.5 - 0.1)]
self.assertAllClose(expected_result, result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_true = tf.constant([[0, 0], [1, 1]])
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
m_obj = iou.PerClassIoU(num_classes=2)
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = [0.2 / (0.6 + 0.5 - 0.2), 0.1 / (0.4 + 0.5 - 0.1)]
self.assertAllClose(expected_result, result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = iou.PerClassIoU(num_classes=2)
self.assertAllClose(m_obj.result(), [0, 0], atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([1], dtype=tf.float32)
y_true = tf.constant([1])
m_obj = iou.PerClassIoU(num_classes=2)
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = [0, 1 / (1 + 1 - 1)]
self.assertAllClose(expected_result, result, atol=1e-3)
def test_update_state_and_result(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = iou.PerClassIoU(num_classes=2)
m_obj.update_state(y_true, y_pred)
result = m_obj.result()
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = [1 / (2 + 2 - 1), 1 / (2 + 2 - 1)]
self.assertAllClose(expected_result, result, atol=1e-3)
def test_per_class_iou_v2(self):
metrics = iou.PerClassIoUV2(num_classes=3)
y_true = tf.constant([[
[
[0, 0, 1],
[0, 1, 1],
],
[
[0, 1, 0],
[0, 0, 1],
],
]])
y_pred = tf.constant([[
[
[1, 0, 0],
[1, 1, 1],
],
[
[1, 1, 1],
[1, 0, 1],
],
]])
metrics.update_state(y_true, y_pred)
self.assertAllClose([0.0, 1.0, 0.5], metrics.result(), atol=1e-3)
def test_per_class_iou_v2_sparse_input(self):
metrics = iou.PerClassIoUV2(
num_classes=3, sparse_y_true=True, sparse_y_pred=True)
y_true = [[
[1, 2, 1],
[2, 2, 1],
]]
y_pred = [[
[2, 0, 1],
[2, 0, 1],
]]
metrics.update_state(y_true, y_pred)
self.assertAllClose([0., 2. / 3., 1. / 4.], metrics.result(), atol=1e-3)
def test_per_class_iou_v2_keep_tailing_dims(self):
num_classes = 3
num_channels = 2
metrics = iou.PerClassIoUV2(
num_classes=num_classes,
shape=(num_classes, num_channels),
sparse_y_true=True,
sparse_y_pred=True,
axis=0)
y_pred = tf.constant([2, 1])
y_true = tf.constant([2, 0])
metrics.update_state(y_true, y_pred)
self.assertAllClose([[0., 0.], [0., 0.], [1., 0.]],
metrics.result(),
atol=1e-3)
if __name__ == '__main__':
tf.test.main()
| 5,380 | 30.284884 | 77 | py |
models | models-master/official/vision/evaluation/coco_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for coco_utils."""
import os
import numpy as np
import tensorflow as tf
from official.vision.dataloaders import tfexample_utils
from official.vision.evaluation import coco_utils
class CocoUtilsTest(tf.test.TestCase):
def test_scan_and_generator_annotation_file(self):
num_samples = 10
example = tfexample_utils.create_detection_test_example(
image_height=512, image_width=512, image_channel=3, num_instances=10
)
tf_examples = [example] * num_samples
data_file = os.path.join(self.create_tempdir(), 'test.tfrecord')
tfexample_utils.dump_to_tfrecord(
record_file=data_file, tf_examples=tf_examples
)
annotation_file = os.path.join(self.create_tempdir(), 'annotation.json')
coco_utils.scan_and_generator_annotation_file(
file_pattern=data_file,
file_type='tfrecord',
num_samples=num_samples,
include_mask=True,
annotation_file=annotation_file,
)
self.assertTrue(
tf.io.gfile.exists(annotation_file),
msg='Annotation file {annotation_file} does not exist.',
)
def test_convert_keypoint_predictions_to_coco_annotations(self):
batch_size = 1
max_num_detections = 3
num_keypoints = 3
image_size = 512
source_id = [np.array([[1]], dtype=int)]
detection_boxes = [
np.random.random([batch_size, max_num_detections, 4]) * image_size
]
detection_class = [
np.random.randint(1, 5, [batch_size, max_num_detections])
]
detection_scores = [np.random.random([batch_size, max_num_detections])]
detection_keypoints = [
np.random.random([batch_size, max_num_detections, num_keypoints, 2])
* image_size
]
predictions = {
'source_id': source_id,
'detection_boxes': detection_boxes,
'detection_classes': detection_class,
'detection_scores': detection_scores,
'detection_keypoints': detection_keypoints,
}
anns = coco_utils.convert_predictions_to_coco_annotations(predictions)
for i in range(max_num_detections):
expected_keypoint_ann = np.concatenate(
[
np.expand_dims(detection_keypoints[0][0, i, :, 1], axis=-1),
np.expand_dims(detection_keypoints[0][0, i, :, 0], axis=-1),
np.expand_dims(np.ones(num_keypoints), axis=1),
],
axis=1,
).astype(int)
expected_keypoint_ann = expected_keypoint_ann.flatten().tolist()
self.assertAllEqual(anns[i]['keypoints'], expected_keypoint_ann)
if __name__ == '__main__':
tf.test.main()
| 3,189 | 32.229167 | 76 | py |
models | models-master/official/vision/evaluation/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/evaluation/coco_evaluator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The COCO-style evaluator.
The following snippet demonstrates the use of interfaces:
evaluator = COCOEvaluator(...)
for _ in range(num_evals):
for _ in range(num_batches_per_eval):
predictions, groundtruth = predictor.predict(...) # pop a batch.
evaluator.update_state(groundtruths, predictions)
evaluator.result() # finish one full eval and reset states.
See also: https://github.com/cocodataset/cocoapi/
"""
import atexit
import tempfile
# Import libraries
from absl import logging
import numpy as np
from pycocotools import cocoeval
import six
import tensorflow as tf
from official.vision.evaluation import coco_utils
class COCOEvaluator(object):
"""COCO evaluation metric class."""
def __init__(self,
annotation_file,
include_mask,
include_keypoint=False,
need_rescale_bboxes=True,
need_rescale_keypoints=False,
per_category_metrics=False,
max_num_eval_detections=100,
kpt_oks_sigmas=None):
"""Constructs COCO evaluation class.
The class provides the interface to COCO metrics_fn. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the ground-truths and runs COCO evaluation.
Args:
annotation_file: a JSON file that stores annotations of the eval dataset.
If `annotation_file` is None, ground-truth annotations will be loaded
from the dataloader.
include_mask: a boolean to indicate whether or not to include the mask
eval.
include_keypoint: a boolean to indicate whether or not to include the
keypoint eval.
need_rescale_bboxes: If true bboxes in `predictions` will be rescaled back
to absolute values (`image_info` is needed in this case).
need_rescale_keypoints: If true keypoints in `predictions` will be
rescaled back to absolute values (`image_info` is needed in this case).
per_category_metrics: Whether to return per category metrics.
max_num_eval_detections: Maximum number of detections to evaluate in coco
eval api. Default at 100.
kpt_oks_sigmas: The sigmas used to calculate keypoint OKS. See
http://cocodataset.org/#keypoints-eval. When None, it will use the
defaults in COCO.
Raises:
ValueError: if max_num_eval_detections is not an integer.
"""
if annotation_file:
if annotation_file.startswith('gs://'):
_, local_val_json = tempfile.mkstemp(suffix='.json')
tf.io.gfile.remove(local_val_json)
tf.io.gfile.copy(annotation_file, local_val_json)
atexit.register(tf.io.gfile.remove, local_val_json)
else:
local_val_json = annotation_file
self._coco_gt = coco_utils.COCOWrapper(
eval_type=('mask' if include_mask else 'box'),
annotation_file=local_val_json)
self._annotation_file = annotation_file
self._include_mask = include_mask
self._include_keypoint = include_keypoint
self._per_category_metrics = per_category_metrics
if max_num_eval_detections is None or not isinstance(
max_num_eval_detections, int):
raise ValueError('max_num_eval_detections must be an integer.')
self._metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10',
f'ARmax{max_num_eval_detections}', 'ARs', 'ARm', 'ARl'
]
self.max_num_eval_detections = max_num_eval_detections
self._required_prediction_fields = [
'source_id', 'num_detections', 'detection_classes', 'detection_scores',
'detection_boxes'
]
self._need_rescale_bboxes = need_rescale_bboxes
self._need_rescale_keypoints = need_rescale_keypoints
if self._need_rescale_bboxes or self._need_rescale_keypoints:
self._required_prediction_fields.append('image_info')
self._required_groundtruth_fields = [
'source_id', 'height', 'width', 'classes', 'boxes'
]
if self._include_mask:
mask_metric_names = ['mask_' + x for x in self._metric_names]
self._metric_names.extend(mask_metric_names)
self._required_prediction_fields.extend(['detection_masks'])
self._required_groundtruth_fields.extend(['masks'])
if self._include_keypoint:
keypoint_metric_names = [
'AP', 'AP50', 'AP75', 'APm', 'APl', 'ARmax1', 'ARmax10',
f'ARmax{max_num_eval_detections}', 'ARm', 'ARl'
]
keypoint_metric_names = ['keypoint_' + x for x in keypoint_metric_names]
self._metric_names.extend(keypoint_metric_names)
self._required_prediction_fields.extend(['detection_keypoints'])
self._required_groundtruth_fields.extend(['keypoints'])
self._kpt_oks_sigmas = kpt_oks_sigmas
self.reset_states()
@property
def name(self):
return 'coco_metric'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = {}
if not self._annotation_file:
self._groundtruths = {}
def result(self):
"""Evaluates detection results, and reset_states."""
metric_dict = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_dict
def evaluate(self):
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [24] representing the
coco-style evaluation metrics (box and mask).
"""
if not self._annotation_file:
logging.info('There is no annotation_file in COCOEvaluator.')
gt_dataset = coco_utils.convert_groundtruths_to_coco_dataset(
self._groundtruths)
coco_gt = coco_utils.COCOWrapper(
eval_type=('mask' if self._include_mask else 'box'),
gt_dataset=gt_dataset)
else:
logging.info('Using annotation file: %s', self._annotation_file)
coco_gt = self._coco_gt
coco_predictions = coco_utils.convert_predictions_to_coco_annotations(
self._predictions)
coco_dt = coco_gt.loadRes(predictions=coco_predictions)
image_ids = [ann['image_id'] for ann in coco_predictions]
coco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.params.maxDets[2] = self.max_num_eval_detections
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
metrics = coco_metrics
if self._include_mask:
mcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='segm')
mcoco_eval.params.imgIds = image_ids
mcoco_eval.evaluate()
mcoco_eval.accumulate()
mcoco_eval.summarize()
mask_coco_metrics = mcoco_eval.stats
metrics = np.hstack((metrics, mask_coco_metrics))
if self._include_keypoint:
kcoco_eval = cocoeval.COCOeval(coco_gt, coco_dt, iouType='keypoints',
kpt_oks_sigmas=self._kpt_oks_sigmas)
kcoco_eval.params.imgIds = image_ids
kcoco_eval.evaluate()
kcoco_eval.accumulate()
kcoco_eval.summarize()
keypoint_coco_metrics = kcoco_eval.stats
metrics = np.hstack((metrics, keypoint_coco_metrics))
metrics_dict = {}
for i, name in enumerate(self._metric_names):
metrics_dict[name] = metrics[i].astype(np.float32)
# Adds metrics per category.
if self._per_category_metrics:
metrics_dict.update(self._retrieve_per_category_metrics(coco_eval))
if self._include_mask:
metrics_dict.update(self._retrieve_per_category_metrics(
mcoco_eval, prefix='mask'))
if self._include_keypoint:
metrics_dict.update(self._retrieve_per_category_metrics(
mcoco_eval, prefix='keypoints'))
return metrics_dict
def _retrieve_per_category_metrics(self, coco_eval, prefix=''):
"""Retrieves and per-category metrics and retuns them in a dict.
Args:
coco_eval: a cocoeval.COCOeval object containing evaluation data.
prefix: str, A string used to prefix metric names.
Returns:
metrics_dict: A dictionary with per category metrics.
"""
metrics_dict = {}
if prefix:
prefix = prefix + ' '
if hasattr(coco_eval, 'category_stats'):
for category_index, category_id in enumerate(coco_eval.params.catIds):
if self._annotation_file:
coco_category = self._coco_gt.cats[category_id]
# if 'name' is available use it, otherwise use `id`
category_display_name = coco_category.get('name', category_id)
else:
category_display_name = category_id
if 'keypoints' in prefix:
metrics_dict_keys = [
'Precision mAP ByCategory',
'Precision mAP ByCategory@50IoU',
'Precision mAP ByCategory@75IoU',
'Precision mAP ByCategory (medium)',
'Precision mAP ByCategory (large)',
'Recall AR@1 ByCategory',
'Recall AR@10 ByCategory',
'Recall AR@100 ByCategory',
'Recall AR (medium) ByCategory',
'Recall AR (large) ByCategory',
]
else:
metrics_dict_keys = [
'Precision mAP ByCategory',
'Precision mAP ByCategory@50IoU',
'Precision mAP ByCategory@75IoU',
'Precision mAP ByCategory (small)',
'Precision mAP ByCategory (medium)',
'Precision mAP ByCategory (large)',
'Recall AR@1 ByCategory',
'Recall AR@10 ByCategory',
'Recall AR@100 ByCategory',
'Recall AR (small) ByCategory',
'Recall AR (medium) ByCategory',
'Recall AR (large) ByCategory',
]
for idx, key in enumerate(metrics_dict_keys):
metrics_dict[prefix + key + '/{}'.format(
category_display_name)] = coco_eval.category_stats[idx][
category_index].astype(np.float32)
return metrics_dict
def _process_bbox_predictions(self, predictions):
image_scale = np.tile(predictions['image_info'][:, 2:3, :], (1, 1, 2))
predictions['detection_boxes'] = (
predictions['detection_boxes'].astype(np.float32))
predictions['detection_boxes'] /= image_scale
if 'detection_outer_boxes' in predictions:
predictions['detection_outer_boxes'] = (
predictions['detection_outer_boxes'].astype(np.float32))
predictions['detection_outer_boxes'] /= image_scale
def _process_keypoints_predictions(self, predictions):
image_scale = tf.reshape(predictions['image_info'][:, 2:3, :],
[-1, 1, 1, 2])
predictions['detection_keypoints'] = (
predictions['detection_keypoints'].astype(np.float32))
predictions['detection_keypoints'] /= image_scale
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
if groundtruths:
labels = tf.nest.map_structure(lambda x: x.numpy(), groundtruths)
numpy_groundtruths = {}
for key, val in labels.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_groundtruths[key] = val
else:
numpy_groundtruths = groundtruths
if predictions:
outputs = tf.nest.map_structure(lambda x: x.numpy(), predictions)
numpy_predictions = {}
for key, val in outputs.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_predictions[key] = val
else:
numpy_predictions = predictions
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update and aggregate detection results and ground-truth data.
Args:
groundtruths: a dictionary of Tensors including the fields below.
See also different parsers under `../dataloader` for more details.
Required fields:
- source_id: a numpy array of int or string of shape [batch_size].
- height: a numpy array of int of shape [batch_size].
- width: a numpy array of int of shape [batch_size].
- num_detections: a numpy array of int of shape [batch_size].
- boxes: a numpy array of float of shape [batch_size, K, 4].
- classes: a numpy array of int of shape [batch_size, K].
Optional fields:
- is_crowds: a numpy array of int of shape [batch_size, K]. If the
field is absent, it is assumed that this instance is not crowd.
- areas: a numy array of float of shape [batch_size, K]. If the
field is absent, the area is calculated using either boxes or
masks depending on which one is available.
- masks: a numpy array of float of shape
[batch_size, K, mask_height, mask_width],
predictions: a dictionary of tensors including the fields below.
See different parsers under `../dataloader` for more details.
Required fields:
- source_id: a numpy array of int or string of shape [batch_size].
- image_info [if `need_rescale_bboxes` is True]: a numpy array of
float of shape [batch_size, 4, 2].
- num_detections: a numpy array of
int of shape [batch_size].
- detection_boxes: a numpy array of float of shape [batch_size, K, 4].
- detection_classes: a numpy array of int of shape [batch_size, K].
- detection_scores: a numpy array of float of shape [batch_size, K].
Optional fields:
- detection_masks: a numpy array of float of shape
[batch_size, K, mask_height, mask_width].
Raises:
ValueError: if the required prediction or ground-truth fields are not
present in the incoming `predictions` or `groundtruths`.
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths,
predictions)
for k in self._required_prediction_fields:
if k not in predictions:
raise ValueError(
'Missing the required key `{}` in predictions!'.format(k))
if self._need_rescale_bboxes:
self._process_bbox_predictions(predictions)
if self._need_rescale_keypoints:
self._process_keypoints_predictions(predictions)
for k, v in six.iteritems(predictions):
if k not in self._predictions:
self._predictions[k] = [v]
else:
self._predictions[k].append(v)
if not self._annotation_file:
assert groundtruths
for k in self._required_groundtruth_fields:
if k not in groundtruths:
raise ValueError(
'Missing the required key `{}` in groundtruths!'.format(k))
for k, v in six.iteritems(groundtruths):
if k not in self._groundtruths:
self._groundtruths[k] = [v]
else:
self._groundtruths[k].append(v)
| 15,591 | 39.393782 | 80 | py |
models | models-master/official/vision/evaluation/panoptic_quality_evaluator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for panoptic_quality_evaluator."""
import numpy as np
import tensorflow as tf
from official.vision.evaluation import panoptic_quality_evaluator
class PanopticQualityEvaluatorTest(tf.test.TestCase):
def test_multiple_batches(self):
category_mask = np.zeros([6, 6], np.uint16)
groundtruth_instance_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 1],
[1, 2, 2, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
good_det_instance_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 1],
[1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
groundtruths = {
'category_mask':
tf.convert_to_tensor([category_mask]),
'instance_mask':
tf.convert_to_tensor([groundtruth_instance_mask]),
'image_info':
tf.convert_to_tensor([[[6, 6], [6, 6], [1.0, 1.0], [0, 0]]],
dtype=tf.float32)
}
predictions = {
'category_mask': tf.convert_to_tensor([category_mask]),
'instance_mask': tf.convert_to_tensor([good_det_instance_mask])
}
pq_evaluator = panoptic_quality_evaluator.PanopticQualityEvaluator(
num_categories=1,
ignored_label=2,
max_instances_per_category=16,
offset=16,
rescale_predictions=True)
for _ in range(2):
pq_evaluator.update_state(groundtruths, predictions)
bad_det_instance_mask = np.array([
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 2, 2, 1],
[1, 1, 1, 1, 1, 1],
],
dtype=np.uint16)
predictions['instance_mask'] = tf.convert_to_tensor([bad_det_instance_mask])
for _ in range(2):
pq_evaluator.update_state(groundtruths, predictions)
results = pq_evaluator.result()
np.testing.assert_array_equal(results['pq_per_class'],
[((28 / 30 + 6 / 8) + (27 / 32)) / 2 / 2])
np.testing.assert_array_equal(results['rq_per_class'], [3 / 4])
np.testing.assert_array_equal(results['sq_per_class'],
[((28 / 30 + 6 / 8) + (27 / 32)) / 3])
self.assertAlmostEqual(results['All_pq'], 0.63177083)
self.assertAlmostEqual(results['All_rq'], 0.75)
self.assertAlmostEqual(results['All_sq'], 0.84236111)
self.assertEqual(results['All_num_categories'], 1)
if __name__ == '__main__':
tf.test.main()
| 3,323 | 33.268041 | 80 | py |
models | models-master/official/vision/evaluation/coco_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions related to pycocotools and COCO eval."""
import copy
import json
# Import libraries
from absl import logging
import numpy as np
from PIL import Image
from pycocotools import coco
from pycocotools import mask as mask_api
import six
import tensorflow as tf
from official.common import dataset_fn
from official.vision.dataloaders import tf_example_decoder
from official.vision.ops import box_ops
from official.vision.ops import mask_ops
class COCOWrapper(coco.COCO):
"""COCO wrapper class.
This class wraps COCO API object, which provides the following additional
functionalities:
1. Support string type image id.
2. Support loading the ground-truth dataset using the external annotation
dictionary.
3. Support loading the prediction results using the external annotation
dictionary.
"""
def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None):
"""Instantiates a COCO-style API object.
Args:
eval_type: either 'box' or 'mask'.
annotation_file: a JSON file that stores annotations of the eval dataset.
This is required if `gt_dataset` is not provided.
gt_dataset: the ground-truth eval datatset in COCO API format.
"""
if ((annotation_file and gt_dataset) or
((not annotation_file) and (not gt_dataset))):
raise ValueError('One and only one of `annotation_file` and `gt_dataset` '
'needs to be specified.')
if eval_type not in ['box', 'mask']:
raise ValueError('The `eval_type` can only be either `box` or `mask`.')
coco.COCO.__init__(self, annotation_file=annotation_file)
self._eval_type = eval_type
if gt_dataset:
self.dataset = gt_dataset
self.createIndex()
def loadRes(self, predictions):
"""Loads result file and return a result api object.
Args:
predictions: a list of dictionary each representing an annotation in COCO
format. The required fields are `image_id`, `category_id`, `score`,
`bbox`, `segmentation`.
Returns:
res: result COCO api object.
Raises:
ValueError: if the set of image id from predctions is not the subset of
the set of image id of the ground-truth dataset.
"""
res = coco.COCO()
res.dataset['images'] = copy.deepcopy(self.dataset['images'])
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
image_ids = [ann['image_id'] for ann in predictions]
if set(image_ids) != (set(image_ids) & set(self.getImgIds())):
raise ValueError('Results do not correspond to the current dataset!')
for ann in predictions:
x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2],
ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]]
if self._eval_type == 'box':
ann['area'] = ann['bbox'][2] * ann['bbox'][3]
ann['segmentation'] = [
[x1, y1, x1, y2, x2, y2, x2, y1]]
elif self._eval_type == 'mask':
ann['area'] = mask_api.area(ann['segmentation'])
res.dataset['annotations'] = copy.deepcopy(predictions)
res.createIndex()
return res
def convert_predictions_to_coco_annotations(predictions):
"""Converts a batch of predictions to annotations in COCO format.
Args:
predictions: a dictionary of lists of numpy arrays including the following
fields. 'K' below denotes the maximum number of instances per image.
Required fields:
- source_id: a list of numpy arrays of int or string of shape
[batch_size].
- detection_boxes: a list of numpy arrays of float of shape
[batch_size, K, 4], where coordinates are in the original image
space (not the scaled image space).
- detection_classes: a list of numpy arrays of int of shape
[batch_size, K].
- detection_scores: a list of numpy arrays of float of shape
[batch_size, K].
Optional fields:
- detection_masks: a list of numpy arrays of float of shape
[batch_size, K, mask_height, mask_width].
- detection_keypoints: a list of numpy arrays of float of shape
[batch_size, K, num_keypoints, 2]
Returns:
coco_predictions: prediction in COCO annotation format.
"""
coco_predictions = []
num_batches = len(predictions['source_id'])
max_num_detections = predictions['detection_classes'][0].shape[1]
use_outer_box = 'detection_outer_boxes' in predictions
for i in range(num_batches):
predictions['detection_boxes'][i] = box_ops.yxyx_to_xywh(
predictions['detection_boxes'][i])
if use_outer_box:
predictions['detection_outer_boxes'][i] = box_ops.yxyx_to_xywh(
predictions['detection_outer_boxes'][i])
mask_boxes = predictions['detection_outer_boxes']
else:
mask_boxes = predictions['detection_boxes']
batch_size = predictions['source_id'][i].shape[0]
if 'detection_keypoints' in predictions:
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO. Also, convert keypoint from [y, x] to [x, y]
# as mandated by COCO.
num_keypoints = predictions['detection_keypoints'][i].shape[2]
coco_keypoints = np.concatenate(
[
predictions['detection_keypoints'][i][..., 1:],
predictions['detection_keypoints'][i][..., :1],
np.ones([batch_size, max_num_detections, num_keypoints, 1]),
],
axis=-1,
).astype(int)
for j in range(batch_size):
if 'detection_masks' in predictions:
image_masks = mask_ops.paste_instance_masks(
predictions['detection_masks'][i][j],
mask_boxes[i][j],
int(predictions['image_info'][i][j, 0, 0]),
int(predictions['image_info'][i][j, 0, 1]),
)
binary_masks = (image_masks > 0.0).astype(np.uint8)
encoded_masks = [
mask_api.encode(np.asfortranarray(binary_mask))
for binary_mask in list(binary_masks)
]
for k in range(max_num_detections):
ann = {}
ann['image_id'] = predictions['source_id'][i][j]
ann['category_id'] = predictions['detection_classes'][i][j, k]
ann['bbox'] = predictions['detection_boxes'][i][j, k]
ann['score'] = predictions['detection_scores'][i][j, k]
if 'detection_masks' in predictions:
ann['segmentation'] = encoded_masks[k]
if 'detection_keypoints' in predictions:
ann['keypoints'] = coco_keypoints[j, k].flatten().tolist()
coco_predictions.append(ann)
for i, ann in enumerate(coco_predictions):
ann['id'] = i + 1
return coco_predictions
def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
"""Converts ground-truths to the dataset in COCO format.
Args:
groundtruths: a dictionary of numpy arrays including the fields below.
Note that each element in the list represent the number for a single
example without batch dimension. 'K' below denotes the actual number of
instances for each image.
Required fields:
- source_id: a list of numpy arrays of int or string of shape
[batch_size].
- height: a list of numpy arrays of int of shape [batch_size].
- width: a list of numpy arrays of int of shape [batch_size].
- num_detections: a list of numpy arrays of int of shape [batch_size].
- boxes: a list of numpy arrays of float of shape [batch_size, K, 4],
where coordinates are in the original image space (not the
normalized coordinates).
- classes: a list of numpy arrays of int of shape [batch_size, K].
Optional fields:
- is_crowds: a list of numpy arrays of int of shape [batch_size, K]. If
th field is absent, it is assumed that this instance is not crowd.
- areas: a list of numy arrays of float of shape [batch_size, K]. If the
field is absent, the area is calculated using either boxes or
masks depending on which one is available.
- masks: a list of numpy arrays of string of shape [batch_size, K],
label_map: (optional) a dictionary that defines items from the category id
to the category name. If `None`, collect the category mapping from the
`groundtruths`.
Returns:
coco_groundtruths: the ground-truth dataset in COCO format.
"""
source_ids = np.concatenate(groundtruths['source_id'], axis=0)
heights = np.concatenate(groundtruths['height'], axis=0)
widths = np.concatenate(groundtruths['width'], axis=0)
gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w
in zip(source_ids, heights, widths)]
gt_annotations = []
num_batches = len(groundtruths['source_id'])
for i in range(num_batches):
logging.log_every_n(
logging.INFO,
'convert_groundtruths_to_coco_dataset: Processing annotation %d', 100,
i)
max_num_instances = groundtruths['classes'][i].shape[1]
batch_size = groundtruths['source_id'][i].shape[0]
for j in range(batch_size):
num_instances = groundtruths['num_detections'][i][j]
if num_instances > max_num_instances:
logging.warning(
'num_groundtruths is larger than max_num_instances, %d v.s. %d',
num_instances, max_num_instances)
num_instances = max_num_instances
for k in range(int(num_instances)):
ann = {}
ann['image_id'] = int(groundtruths['source_id'][i][j])
if 'is_crowds' in groundtruths:
ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k])
else:
ann['iscrowd'] = 0
ann['category_id'] = int(groundtruths['classes'][i][j, k])
boxes = groundtruths['boxes'][i]
ann['bbox'] = [
float(boxes[j, k, 1]),
float(boxes[j, k, 0]),
float(boxes[j, k, 3] - boxes[j, k, 1]),
float(boxes[j, k, 2] - boxes[j, k, 0])]
if 'areas' in groundtruths:
ann['area'] = float(groundtruths['areas'][i][j, k])
else:
ann['area'] = float(
(boxes[j, k, 3] - boxes[j, k, 1]) *
(boxes[j, k, 2] - boxes[j, k, 0]))
if 'masks' in groundtruths:
if isinstance(groundtruths['masks'][i][j, k], tf.Tensor):
mask = Image.open(
six.BytesIO(groundtruths['masks'][i][j, k].numpy()))
else:
mask = Image.open(
six.BytesIO(groundtruths['masks'][i][j, k]))
np_mask = np.array(mask, dtype=np.uint8)
np_mask[np_mask > 0] = 255
encoded_mask = mask_api.encode(np.asfortranarray(np_mask))
ann['segmentation'] = encoded_mask
# Ensure the content of `counts` is JSON serializable string.
if 'counts' in ann['segmentation']:
ann['segmentation']['counts'] = six.ensure_str(
ann['segmentation']['counts'])
if 'areas' not in groundtruths:
ann['area'] = mask_api.area(encoded_mask)
if 'keypoints' in groundtruths:
keypoints = groundtruths['keypoints'][i]
coco_keypoints = []
num_valid_keypoints = 0
for z in range(len(keypoints[j, k, :, 1])):
# Convert from [y, x] to [x, y] as mandated by COCO.
x = float(keypoints[j, k, z, 1])
y = float(keypoints[j, k, z, 0])
coco_keypoints.append(x)
coco_keypoints.append(y)
if tf.math.is_nan(x) or tf.math.is_nan(y) or (
x == 0 and y == 0):
visibility = 0
else:
visibility = 2
num_valid_keypoints = num_valid_keypoints + 1
coco_keypoints.append(visibility)
ann['keypoints'] = coco_keypoints
ann['num_keypoints'] = num_valid_keypoints
gt_annotations.append(ann)
for i, ann in enumerate(gt_annotations):
ann['id'] = i + 1
if label_map:
gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map]
else:
category_ids = [gt['category_id'] for gt in gt_annotations]
gt_categories = [{'id': i} for i in set(category_ids)]
gt_dataset = {
'images': gt_images,
'categories': gt_categories,
'annotations': copy.deepcopy(gt_annotations),
}
return gt_dataset
class COCOGroundtruthGenerator:
"""Generates the ground-truth annotations from a single example."""
def __init__(self, file_pattern, file_type, num_examples, include_mask,
regenerate_source_id=False):
self._file_pattern = file_pattern
self._num_examples = num_examples
self._include_mask = include_mask
self._dataset_fn = dataset_fn.pick_dataset_fn(file_type)
self._regenerate_source_id = regenerate_source_id
def _parse_single_example(self, example):
"""Parses a single serialized tf.Example proto.
Args:
example: a serialized tf.Example proto string.
Returns:
A dictionary of ground-truth with the following fields:
source_id: a scalar tensor of int64 representing the image source_id.
height: a scalar tensor of int64 representing the image height.
width: a scalar tensor of int64 representing the image width.
boxes: a float tensor of shape [K, 4], representing the ground-truth
boxes in absolute coordinates with respect to the original image size.
classes: a int64 tensor of shape [K], representing the class labels of
each instances.
is_crowds: a bool tensor of shape [K], indicating whether the instance
is crowd.
areas: a float tensor of shape [K], indicating the area of each
instance.
masks: a string tensor of shape [K], containing the bytes of the png
mask of each instance.
"""
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=self._include_mask,
regenerate_source_id=self._regenerate_source_id)
decoded_tensors = decoder.decode(example)
image = decoded_tensors['image']
image_size = tf.shape(image)[0:2]
boxes = box_ops.denormalize_boxes(
decoded_tensors['groundtruth_boxes'], image_size)
source_id = decoded_tensors['source_id']
if source_id.dtype is tf.string:
source_id = tf.strings.to_number(source_id, out_type=tf.int64)
groundtruths = {
'source_id': source_id,
'height': decoded_tensors['height'],
'width': decoded_tensors['width'],
'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0],
'boxes': boxes,
'classes': decoded_tensors['groundtruth_classes'],
'is_crowds': decoded_tensors['groundtruth_is_crowd'],
'areas': decoded_tensors['groundtruth_area'],
}
if self._include_mask:
groundtruths.update({
'masks': decoded_tensors['groundtruth_instance_masks_png'],
})
return groundtruths
def _build_pipeline(self):
"""Builds data pipeline to generate ground-truth annotations."""
dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
dataset = dataset.interleave(
map_func=lambda filename: self._dataset_fn(filename).prefetch(1),
cycle_length=None,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.take(self._num_examples)
dataset = dataset.map(self._parse_single_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(1, drop_remainder=False)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def __call__(self):
return self._build_pipeline()
def scan_and_generator_annotation_file(file_pattern: str,
file_type: str,
num_samples: int,
include_mask: bool,
annotation_file: str,
regenerate_source_id: bool = False):
"""Scans and generate the COCO-style annotation JSON file given a dataset."""
groundtruth_generator = COCOGroundtruthGenerator(
file_pattern, file_type, num_samples, include_mask, regenerate_source_id)
generate_annotation_file(groundtruth_generator, annotation_file)
def generate_annotation_file(groundtruth_generator,
annotation_file):
"""Generates COCO-style annotation JSON file given a ground-truth generator."""
groundtruths = {}
logging.info('Loading groundtruth annotations from dataset to memory...')
for i, groundtruth in enumerate(groundtruth_generator()):
logging.log_every_n(logging.INFO,
'generate_annotation_file: Processing annotation %d',
100, i)
for k, v in six.iteritems(groundtruth):
if k not in groundtruths:
groundtruths[k] = [v]
else:
groundtruths[k].append(v)
gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths)
logging.info('Saving groundtruth annotations to the JSON file...')
with tf.io.gfile.GFile(annotation_file, 'w') as f:
f.write(json.dumps(gt_dataset))
logging.info('Done saving the JSON file...')
| 17,870 | 40.082759 | 81 | py |
models | models-master/official/vision/examples/starter/example_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sample model implementation.
This is only a dummy example to showcase how a model is composed. It is usually
not needed to implement a model from scratch. Most SoTA models can be found and
directly used from `official/vision/modeling` directory.
"""
from typing import Any, Mapping
# Import libraries
import tensorflow as tf
from official.vision.examples.starter import example_config as example_cfg
class ExampleModel(tf.keras.Model):
"""A example model class.
A model is a subclass of tf.keras.Model where layers are built in the
constructor.
"""
def __init__(
self,
num_classes: int,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
**kwargs):
"""Initializes the example model.
All layers are defined in the constructor, and config is recorded in the
`_config_dict` object for serialization.
Args:
num_classes: The number of classes in classification task.
input_specs: A `tf.keras.layers.InputSpec` spec of the input tensor.
**kwargs: Additional keyword arguments to be passed.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name)
outputs = tf.keras.layers.Conv2D(
filters=16, kernel_size=3, strides=2, padding='same', use_bias=False)(
inputs)
outputs = tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=2, padding='same', use_bias=False)(
outputs)
outputs = tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=2, padding='same', use_bias=False)(
outputs)
outputs = tf.keras.layers.GlobalAveragePooling2D()(outputs)
outputs = tf.keras.layers.Dense(1024, activation='relu')(outputs)
outputs = tf.keras.layers.Dense(num_classes)(outputs)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self._input_specs = input_specs
self._config_dict = {'num_classes': num_classes, 'input_specs': input_specs}
def get_config(self) -> Mapping[str, Any]:
"""Gets the config of this model."""
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
"""Constructs an instance of this model from input config."""
return cls(**config)
def build_example_model(input_specs: tf.keras.layers.InputSpec,
model_config: example_cfg.ExampleModel,
**kwargs) -> tf.keras.Model:
"""Builds and returns the example model.
This function is the main entry point to build a model. Commonly, it builds a
model by building a backbone, decoder and head. An example of building a
classification model is at
third_party/tensorflow_models/official/vision/modeling/backbones/resnet.py.
However, it is not mandatory for all models to have these three pieces
exactly. Depending on the task, model can be as simple as the example model
here or more complex, such as multi-head architecture.
Args:
input_specs: The specs of the input layer that defines input size.
model_config: The config containing parameters to build a model.
**kwargs: Additional keyword arguments to be passed.
Returns:
A tf.keras.Model object.
"""
return ExampleModel(
num_classes=model_config.num_classes, input_specs=input_specs, **kwargs)
| 3,933 | 37.568627 | 80 | py |
models | models-master/official/vision/examples/starter/example_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example experiment configuration definition."""
import dataclasses
from typing import List
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
@dataclasses.dataclass
class ExampleDataConfig(cfg.DataConfig):
"""Input config for training. Add more fields as needed."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 10000
cycle_length: int = 10
file_type: str = 'tfrecord'
@dataclasses.dataclass
class ExampleModel(hyperparams.Config):
"""The model config. Used by build_example_model function."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class Losses(hyperparams.Config):
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
top_k: int = 5
@dataclasses.dataclass
class ExampleTask(cfg.TaskConfig):
"""The task config."""
model: ExampleModel = ExampleModel()
train_data: ExampleDataConfig = ExampleDataConfig(is_training=True)
validation_data: ExampleDataConfig = ExampleDataConfig(is_training=False)
losses: Losses = Losses()
evaluation: Evaluation = Evaluation()
@exp_factory.register_config_factory('tf_vision_example_experiment')
def tf_vision_example_experiment() -> cfg.ExperimentConfig:
"""Definition of a full example experiment."""
train_batch_size = 256
eval_batch_size = 256
steps_per_epoch = 10
config = cfg.ExperimentConfig(
task=ExampleTask(
model=ExampleModel(num_classes=10, input_size=[128, 128, 3]),
losses=Losses(l2_weight_decay=1e-4),
train_data=ExampleDataConfig(
input_path='/path/to/train*',
is_training=True,
global_batch_size=train_batch_size),
validation_data=ExampleDataConfig(
input_path='/path/to/valid*',
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=90 * steps_per_epoch,
validation_steps=steps_per_epoch,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.6,
'decay_steps': 350 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 3,867 | 32.059829 | 75 | py |
models | models-master/official/vision/examples/starter/example_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example task definition for image classification."""
from typing import Any, List, Optional, Tuple, Sequence, Mapping
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.dataloaders import input_reader_factory
from official.vision.examples.starter import example_config as exp_cfg
from official.vision.examples.starter import example_input
from official.vision.examples.starter import example_model
@task_factory.register_task_cls(exp_cfg.ExampleTask)
class ExampleTask(base_task.Task):
"""Class of an example task.
A task is a subclass of base_task.Task that defines model, input, loss, metric
and one training and evaluation step, etc.
"""
def build_model(self) -> tf.keras.Model:
"""Builds a model."""
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self.task_config.model.input_size)
model = example_model.build_example_model(
input_specs=input_specs, model_config=self.task_config.model)
return model
def build_inputs(
self,
params: exp_cfg.ExampleDataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds input.
The input from this function is a tf.data.Dataset that has gone through
pre-processing steps, such as augmentation, batching, shuffling, etc.
Args:
params: The experiment config.
input_context: An optional InputContext used by input reader.
Returns:
A tf.data.Dataset object.
"""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
decoder = example_input.Decoder()
parser = example_input.Parser(
output_size=input_size[:2], num_classes=num_classes)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses: Optional[Any] = None) -> tf.Tensor:
"""Builds losses for training and validation.
Args:
labels: Input groundt-ruth labels.
model_outputs: Output of the model.
aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.
Returns:
The total loss tensor.
"""
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation.
This function builds and returns a list of metrics to compute during
training and validation. The list contains objects of subclasses of
tf.keras.metrics.Metric. Training and validation can have different metrics.
Args:
training: Whether the metric is for training or not.
Returns:
A list of tf.keras.metrics.Metric objects.
"""
k = self.task_config.evaluation.top_k
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Mapping[str, Any]:
"""Does forward and backward.
This example assumes input is a tuple of (features, labels), which follows
the output from data loader, i.e., Parser. The output from Parser is fed
into train_step to perform one step forward and backward pass. Other data
structure, such as dictionary, can also be used, as long as it is consistent
between output from Parser and input used here.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None) -> Mapping[str, Any]:
"""Runs validation step.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model) -> Any:
"""Performs the forward step. It is used in 'validation_step'."""
return model(inputs, training=False)
| 7,652 | 35.442857 | 80 | py |
models | models-master/official/vision/examples/starter/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration.
Custom models, task, configs, etc need to be imported to registry,so they can be
picked up by the trainer. They can be included in this file so you do not need
to handle each file separately.
"""
# pylint: disable=unused-import
from official.common import registry_imports
from official.vision.examples.starter import example_config
from official.vision.examples.starter import example_input
from official.vision.examples.starter import example_model
from official.vision.examples.starter import example_task
| 1,163 | 40.571429 | 80 | py |
models | models-master/official/vision/examples/starter/example_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example classification decoder and parser.
This file defines the Decoder and Parser to load data. The example is shown on
loading standard tf.Example data but non-standard tf.Example or other data
format can be supported by implementing proper decoder and parser.
"""
from typing import Mapping, List, Tuple
# Import libraries
import tensorflow as tf
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
from official.vision.ops import preprocess_ops
class Decoder(decoder.Decoder):
"""A tf.Example decoder for classification task."""
def __init__(self):
"""Initializes the decoder.
The constructor defines the mapping between the field name and the value
from an input tf.Example. For example, we define two fields for image bytes
and labels. There is no limit on the number of fields to decode.
"""
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/class/label':
tf.io.FixedLenFeature((), tf.int64, default_value=-1)
}
def decode(self,
serialized_example: tf.train.Example) -> Mapping[str, tf.Tensor]:
"""Decodes a tf.Example to a dictionary.
This function decodes a serialized tf.Example to a dictionary. The output
will be consumed by `_parse_train_data` and `_parse_validation_data` in
Parser.
Args:
serialized_example: A serialized tf.Example.
Returns:
A dictionary of field key name and decoded tensor mapping.
"""
return tf.io.parse_single_example(
serialized_example, self._keys_to_features)
class Parser(parser.Parser):
"""Parser to parse an image and its annotations.
To define own Parser, client should override _parse_train_data and
_parse_eval_data functions, where decoded tensors are parsed with optional
pre-processing steps. The output from the two functions can be any structure
like tuple, list or dictionary.
"""
def __init__(self, output_size: List[int], num_classes: float):
"""Initializes parameters for parsing annotations in the dataset.
This example only takes two arguments but one can freely add as many
arguments as needed. For example, pre-processing and augmentations usually
happen in Parser, and related parameters can be passed in by this
constructor.
Args:
output_size: `Tensor` or `list` for [height, width] of output image.
num_classes: `float`, number of classes.
"""
self._output_size = output_size
self._num_classes = num_classes
self._dtype = tf.float32
def _parse_data(
self, decoded_tensors: Mapping[str,
tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor]:
label = tf.cast(decoded_tensors['image/class/label'], dtype=tf.int32)
image_bytes = decoded_tensors['image/encoded']
image = tf.io.decode_jpeg(image_bytes, channels=3)
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.ensure_shape(image, self._output_size + [3])
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image = tf.image.convert_image_dtype(image, self._dtype)
return image, label
def _parse_train_data(
self, decoded_tensors: Mapping[str,
tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parses data for training.
Args:
decoded_tensors: A dictionary of field key name and decoded tensor mapping
from Decoder.
Returns:
A tuple of (image, label) tensors.
"""
return self._parse_data(decoded_tensors)
def _parse_eval_data(
self, decoded_tensors: Mapping[str,
tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parses data for evaluation.
Args:
decoded_tensors: A dictionary of field key name and decoded tensor mapping
from Decoder.
Returns:
A tuple of (image, label) tensors.
"""
return self._parse_data(decoded_tensors)
| 4,789 | 34.481481 | 80 | py |
models | models-master/official/vision/examples/starter/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision trainer.
All custom registry are imported from registry_imports. Here we use default
trainer,so we directly call train.main. If you need to customize the trainer,
branch from `official/vision/beta/train.py` and make changes.
"""
from absl import app
from official.common import flags as tfm_flags
from official.vision import train
from official.vision.examples.starter import registry_imports # pylint: disable=unused-import
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,149 | 36.096774 | 94 | py |
models | models-master/official/vision/serving/export_base_v2.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
from typing import Dict, Optional, Text, Callable, Any, Union
import tensorflow as tf
from official.core import export_base
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g.
tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.input_signature = input_signature
@tf.function
def serve(self, inputs):
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
| 2,731 | 34.947368 | 79 | py |
models | models-master/official/vision/serving/video_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import io
import os
import random
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.core import exp_factory
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.dataloaders import tfexample_utils
from official.vision.serving import video_classification
class VideoClassificationTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self):
params = exp_factory.get_exp_config('video_classification_ucf101')
params.task.train_data.feature_shape = (8, 64, 64, 3)
params.task.validation_data.feature_shape = (8, 64, 64, 3)
params.task.model.backbone.resnet_3d.model_id = 50
classification_module = video_classification.VideoClassificationModule(
params, batch_size=1, input_image_size=[8, 64, 64])
return classification_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type, module=None):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
images = np.random.randint(
low=0, high=255, size=(1, 8, 64, 64, 3), dtype=np.uint8)
# images = np.zeros((1, 8, 64, 64, 3), dtype=np.uint8)
return images, images
elif input_type == 'tf_example':
example = tfexample_utils.make_video_test_example(
image_shape=(64, 64, 3),
audio_shape=(20, 128),
label=random.randint(0, 100)).SerializeToString()
images = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._decode_tf_example,
elems=tf.constant([example]),
fn_output_signature={
video_classification.video_input.IMAGE_KEY: tf.string,
}))
images = images[video_classification.video_input.IMAGE_KEY]
return [example], images
else:
raise ValueError(f'{input_type}')
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type):
tmp_dir = self.get_temp_dir()
module = self._get_classification_module()
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
classification_fn = imported.signatures['serving_default']
images, images_tensor = self._get_dummy_input(input_type, module)
processed_images = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._preprocess_image,
elems=images_tensor,
fn_output_signature={
'image': tf.float32,
}))
expected_logits = module.model(processed_images, training=False)
expected_prob = tf.nn.softmax(expected_logits)
out = classification_fn(tf.constant(images))
# The imported model should contain any trackable attrs that the original
# model had.
self.assertAllClose(out['logits'].numpy(), expected_logits.numpy())
self.assertAllClose(out['probs'].numpy(), expected_prob.numpy())
if __name__ == '__main__':
tf.test.main()
| 4,222 | 36.04386 | 78 | py |
models | models-master/official/vision/serving/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export the image classification as a TF-Hub SavedModel."""
# Import libraries
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_tfhub_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco'
)
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string(
'checkpoint_path', None, 'Checkpoint path.'
)
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help=(
'YAML/JSON files which specifies overrides. The override order follows'
' the order of args. Note that each file can be used as an override'
' template to override the default parameters specified in Python. If'
' the same parameter is specified in both `--config_file` and'
' `--params_override`, `config_file` will be used first, followed by'
' params_override.'
),
)
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override',
'',
(
'The JSON/YAML file or string which specifies the parameter to be'
' overriden on top of `config_file` template.'
),
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'The batch size.')
_INPUT_IMAGE_SIZE = flags.DEFINE_string(
'input_image_size',
'224,224',
(
'The comma-separated string of two integers representing the'
' height,width of the input to the model.'
),
)
_SKIP_LOGITS_LAYER = flags.DEFINE_boolean(
'skip_logits_layer',
False,
'Whether to skip the prediction layer and only output the feature vector.',
)
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
for config_file in _CONFIG_FILE.value or []:
try:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, config_file, is_strict=False
)
if _PARAMS_OVERRIDE.value:
try:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=False
)
params.validate()
params.lock()
export_tfhub_lib.export_model_to_tfhub(
params=params,
batch_size=_BATCH_SIZE.value,
input_image_size=[int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')],
checkpoint_path=_CHECKPOINT_PATH.value,
export_path=_EXPORT_DIR.value,
num_channels=3,
skip_logits_layer=_SKIP_LOGITS_LAYER.value,
)
if __name__ == '__main__':
app.run(main)
| 3,500 | 31.719626 | 79 | py |
models | models-master/official/vision/serving/export_base_v2_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.export_base_v2."""
import os
import tensorflow as tf
from official.core import export_base
from official.vision.serving import export_base_v2
class TestModel(tf.keras.Model):
def __init__(self):
super().__init__()
self._dense = tf.keras.layers.Dense(2)
def call(self, inputs):
return {'outputs': self._dense(inputs)}
class ExportBaseTest(tf.test.TestCase):
def test_preprocessor(self):
tmp_dir = self.get_temp_dir()
model = TestModel()
inputs = tf.ones([2, 4], tf.float32)
preprocess_fn = lambda inputs: 2 * inputs
module = export_base_v2.ExportModule(
params=None,
input_signature=tf.TensorSpec(shape=[2, 4]),
model=model,
preprocessor=preprocess_fn)
expected_output = model(preprocess_fn(inputs))
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['serving_default'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
imported = tf.saved_model.load(export_dir)
output = imported.signatures['serving_default'](inputs)
print('output', output)
self.assertAllClose(
output['outputs'].numpy(), expected_output['outputs'].numpy())
def test_postprocessor(self):
tmp_dir = self.get_temp_dir()
model = TestModel()
inputs = tf.ones([2, 4], tf.float32)
postprocess_fn = lambda logits: {'outputs': 2 * logits['outputs']}
module = export_base_v2.ExportModule(
params=None,
model=model,
input_signature=tf.TensorSpec(shape=[2, 4]),
postprocessor=postprocess_fn)
expected_output = postprocess_fn(model(inputs))
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['serving_default'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
imported = tf.saved_model.load(export_dir)
output = imported.signatures['serving_default'](inputs)
self.assertAllClose(
output['outputs'].numpy(), expected_output['outputs'].numpy())
if __name__ == '__main__':
tf.test.main()
| 2,870 | 30.9 | 74 | py |
models | models-master/official/vision/serving/export_tfhub_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export a TF-Hub SavedModel."""
from typing import List, Optional
# Import libraries
import tensorflow as tf
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision.modeling import factory
def build_model(batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
num_channels: int = 3,
skip_logits_layer: bool = False) -> tf.keras.Model:
"""Builds a model for TF Hub export.
Args:
batch_size: The batch size of input.
input_image_size: A list of [height, width] specifying the input image size.
params: The config used to train the model.
num_channels: The number of input image channels.
skip_logits_layer: Whether to skip the logits layer for image classification
model. Default is False.
Returns:
A tf.keras.Model instance.
Raises:
ValueError: If the task is not supported.
"""
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None,
skip_logits_layer=skip_logits_layer)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return model
def export_model_to_tfhub(batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_path: str,
num_channels: int = 3,
skip_logits_layer: bool = False):
"""Export a TF2 model to TF-Hub."""
model = build_model(batch_size, input_image_size, params, num_channels,
skip_logits_layer)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(checkpoint_path).assert_existing_objects_matched()
model.save(export_path, include_optimizer=False, save_format='tf')
| 2,870 | 36.776316 | 80 | py |
models | models-master/official/vision/serving/export_tflite_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to facilitate TFLite model conversion."""
import functools
from typing import Iterator, List, Optional
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision import tasks
def create_representative_dataset(
params: cfg.ExperimentConfig,
task: Optional[base_task.Task] = None) -> tf.data.Dataset:
"""Creates a tf.data.Dataset to load images for representative dataset.
Args:
params: An ExperimentConfig.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
Returns:
A tf.data.Dataset instance.
Raises:
ValueError: If task is not supported.
"""
if task is None:
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
task = tasks.image_classification.ImageClassificationTask(params.task)
elif isinstance(params.task, configs.retinanet.RetinaNetTask):
task = tasks.retinanet.RetinaNetTask(params.task)
elif isinstance(params.task, configs.maskrcnn.MaskRCNNTask):
task = tasks.maskrcnn.MaskRCNNTask(params.task)
elif isinstance(params.task,
configs.semantic_segmentation.SemanticSegmentationTask):
task = tasks.semantic_segmentation.SemanticSegmentationTask(params.task)
else:
raise ValueError('Task {} not supported.'.format(type(params.task)))
# Ensure batch size is 1 for TFLite model.
params.task.train_data.global_batch_size = 1
params.task.train_data.dtype = 'float32'
logging.info('Task config: %s', params.task.as_dict())
return task.build_inputs(params=params.task.train_data)
def representative_dataset(
params: cfg.ExperimentConfig,
task: Optional[base_task.Task] = None,
calibration_steps: int = 2000) -> Iterator[List[tf.Tensor]]:
""""Creates representative dataset for input calibration.
Args:
params: An ExperimentConfig.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
calibration_steps: The steps to do calibration.
Yields:
An input image tensor.
"""
dataset = create_representative_dataset(params=params, task=task)
for image, _ in dataset.take(calibration_steps):
# Skip images that do not have 3 channels.
if image.shape[-1] != 3:
continue
yield [image]
def convert_tflite_model(
saved_model_dir: Optional[str] = None,
model: Optional[tf.keras.Model] = None,
quant_type: Optional[str] = None,
params: Optional[cfg.ExperimentConfig] = None,
task: Optional[base_task.Task] = None,
calibration_steps: Optional[int] = 2000,
denylisted_ops: Optional[List[str]] = None,
) -> 'bytes':
"""Converts and returns a TFLite model.
Args:
saved_model_dir: The directory to the SavedModel.
model: An optional tf.keras.Model instance. If `saved_model_dir` is not
available, convert this model to TFLite.
quant_type: The post training quantization (PTQ) method. It can be one of
`default` (dynamic range), `fp16` (float16), `int8` (integer wih float
fallback), `int8_full` (integer only) and None (no quantization).
params: An optional ExperimentConfig to load and preprocess input images to
do calibration for integer quantization.
task: An optional task instance. If it is None, task will be built according
to the task type in params.
calibration_steps: The steps to do calibration.
denylisted_ops: A list of strings containing ops that are excluded from
integer quantization.
Returns:
A converted TFLite model with optional PTQ.
Raises:
ValueError: If `representative_dataset_path` is not present if integer
quantization is requested, or both `saved_model_dir` or `model` are not
provided.
"""
if saved_model_dir:
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
elif model is not None:
converter = tf.lite.TFLiteConverter.from_keras_model(model)
else:
raise ValueError('Either `saved_model_dir` or `model` must be specified.')
if quant_type:
if quant_type.startswith('int8'):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = functools.partial(
representative_dataset,
params=params,
task=task,
calibration_steps=calibration_steps)
if quant_type.startswith('int8_full'):
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
if quant_type == 'int8_full':
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
if quant_type == 'int8_full_int8_io':
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
if denylisted_ops:
debug_options = tf.lite.experimental.QuantizationDebugOptions(
denylisted_ops=denylisted_ops)
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter,
debug_dataset=functools.partial(
representative_dataset,
params=params,
calibration_steps=calibration_steps),
debug_options=debug_options)
debugger.run()
return debugger.get_nondebug_quantized_model()
elif quant_type == 'uint8':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.default_ranges_stats = (-10, 10)
converter.inference_type = tf.uint8
converter.quantized_input_stats = {'input_placeholder': (0., 1.)}
elif quant_type == 'fp16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif quant_type in ('default', 'qat_fp32_io'):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
elif quant_type == 'qat':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
else:
raise ValueError(f'quantization type {quant_type} is not supported.')
return converter.convert()
| 6,907 | 37.592179 | 80 | py |
models | models-master/official/vision/serving/video_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification input and model functions for serving/inference."""
from typing import Mapping, Dict, Text
import tensorflow as tf
from official.vision.dataloaders import video_input
from official.vision.serving import export_base
from official.vision.tasks import video_classification
class VideoClassificationModule(export_base.ExportModule):
"""Video classification Module."""
def _build_model(self):
input_params = self.params.task.train_data
self._num_frames = input_params.feature_shape[0]
self._stride = input_params.temporal_stride
self._min_resize = input_params.min_image_size
self._crop_size = input_params.feature_shape[1]
self._output_audio = input_params.output_audio
task = video_classification.VideoClassificationTask(self.params.task)
return task.build_model()
def _decode_tf_example(self, encoded_inputs: tf.Tensor):
sequence_description = {
# Each image is a string encoding JPEG.
video_input.IMAGE_KEY:
tf.io.FixedLenSequenceFeature((), tf.string),
}
if self._output_audio:
sequence_description[self._params.task.validation_data.audio_feature] = (
tf.io.VarLenFeature(dtype=tf.float32))
_, decoded_tensors = tf.io.parse_single_sequence_example(
encoded_inputs, {}, sequence_description)
for key, value in decoded_tensors.items():
if isinstance(value, tf.SparseTensor):
decoded_tensors[key] = tf.sparse.to_dense(value)
return decoded_tensors
def _preprocess_image(self, image):
image = video_input.process_image(
image=image,
is_training=False,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=1,
min_resize=self._min_resize,
crop_size=self._crop_size,
num_crops=1)
image = tf.cast(image, tf.float32) # Use config.
features = {'image': image}
return features
def _preprocess_audio(self, audio):
features = {}
audio = tf.cast(audio, dtype=tf.float32) # Use config.
audio = video_input.preprocess_ops_3d.sample_sequence(
audio, 20, random=False, stride=1)
audio = tf.ensure_shape(
audio, self._params.task.validation_data.audio_feature_shape)
features['audio'] = audio
return features
@tf.function
def inference_from_tf_example(
self, encoded_inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
with tf.device('cpu:0'):
if self._output_audio:
inputs = tf.map_fn(
self._decode_tf_example, (encoded_inputs),
fn_output_signature={
video_input.IMAGE_KEY: tf.string,
self._params.task.validation_data.audio_feature: tf.float32
})
return self.serve(inputs['image'], inputs['audio'])
else:
inputs = tf.map_fn(
self._decode_tf_example, (encoded_inputs),
fn_output_signature={
video_input.IMAGE_KEY: tf.string,
})
return self.serve(inputs[video_input.IMAGE_KEY], tf.zeros([1, 1]))
@tf.function
def inference_from_image_tensors(
self, input_frames: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(input_frames, tf.zeros([1, 1]))
@tf.function
def inference_from_image_audio_tensors(
self, input_frames: tf.Tensor,
input_audio: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(input_frames, input_audio)
@tf.function
def inference_from_image_bytes(self, inputs: tf.Tensor):
raise NotImplementedError(
'Video classification do not support image bytes input.')
def serve(self, input_frames: tf.Tensor, input_audio: tf.Tensor):
"""Cast image to float and run inference.
Args:
input_frames: uint8 Tensor of shape [batch_size, None, None, 3]
input_audio: float32
Returns:
Tensor holding classification output logits.
"""
with tf.device('cpu:0'):
inputs = tf.map_fn(
self._preprocess_image, (input_frames),
fn_output_signature={
'image': tf.float32,
})
if self._output_audio:
inputs.update(
tf.map_fn(
self._preprocess_audio, (input_audio),
fn_output_signature={'audio': tf.float32}))
logits = self.inference_step(inputs)
if self.params.task.train_data.is_multilabel:
probs = tf.math.sigmoid(logits)
else:
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for key, def_name in function_keys.items():
if key == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + self._input_image_size + [3],
dtype=tf.uint8,
name='INPUT_FRAMES')
signatures[
def_name] = self.inference_from_image_tensors.get_concrete_function(
input_signature)
elif key == 'frames_audio':
input_signature = [
tf.TensorSpec(
shape=[self._batch_size] + self._input_image_size + [3],
dtype=tf.uint8,
name='INPUT_FRAMES'),
tf.TensorSpec(
shape=[self._batch_size] +
self.params.task.train_data.audio_feature_shape,
dtype=tf.float32,
name='INPUT_AUDIO')
]
signatures[
def_name] = self.inference_from_image_audio_tensors.get_concrete_function(
input_signature)
elif key == 'serve_examples' or key == 'tf_example':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string)
signatures[
def_name] = self.inference_from_tf_example.get_concrete_function(
input_signature)
else:
raise ValueError('Unrecognized `input_type`')
return signatures
| 6,874 | 35.569149 | 86 | py |
models | models-master/official/vision/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string('checkpoint_path', None,
'Checkpoint path.')
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'The batch size.')
_IMAGE_TYPE = flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
_INPUT_IMAGE_SIZE = flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
_EXPORT_CHECKPOINT_SUBDIR = flags.DEFINE_string(
'export_checkpoint_subdir', 'checkpoint',
'The subdirectory for checkpoints.')
_EXPORT_SAVED_MODEL_SUBDIR = flags.DEFINE_string(
'export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
_LOG_MODEL_FLOPS_AND_PARAMS = flags.DEFINE_bool(
'log_model_flops_and_params', False,
'If true, logs model flops and parameters.')
_INPUT_NAME = flags.DEFINE_string(
'input_name', None,
'Input tensor name in signature def. Default at None which'
'produces input tensor name `inputs`.')
_FUNCTION_KEYS = flags.DEFINE_string(
'function_keys',
'',
(
'An optional comma-separated string of one or more key:value pair'
' indicating the serving function key and corresponding signature_def'
' name. For example,'
' `tf_example:serving_default,image_tensor:serving_image_tensor` means'
' two serving functions are defined for `tf_example` and `image_tensor`'
' input types.'
),
)
_ADD_TPU_FUNCTION_ALIAS = flags.DEFINE_bool(
'add_tpu_function_alias',
False,
(
'Whether to add TPU function alias so later it can be converted to a'
' TPU SavedModel for inference.'
),
)
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
for config_file in _CONFIG_FILE.value or []:
try:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, config_file, is_strict=False
)
if _PARAMS_OVERRIDE.value:
try:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=False
)
params.validate()
params.lock()
function_keys = None
if _FUNCTION_KEYS.value:
function_keys = {}
for key_val in _FUNCTION_KEYS.value.split(','):
key_val_split = key_val.split(':')
function_keys[key_val_split[0]] = key_val_split[1]
export_saved_model_lib.export_inference_graph(
input_type=_IMAGE_TYPE.value,
batch_size=_BATCH_SIZE.value,
input_image_size=[int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')],
params=params,
checkpoint_path=_CHECKPOINT_PATH.value,
export_dir=_EXPORT_DIR.value,
function_keys=function_keys,
export_checkpoint_subdir=_EXPORT_CHECKPOINT_SUBDIR.value,
export_saved_model_subdir=_EXPORT_SAVED_MODEL_SUBDIR.value,
log_model_flops_and_params=_LOG_MODEL_FLOPS_AND_PARAMS.value,
input_name=_INPUT_NAME.value,
add_tpu_function_alias=_ADD_TPU_FUNCTION_ALIAS.value,
)
if __name__ == '__main__':
app.run(main)
| 5,758 | 35.220126 | 80 | py |
models | models-master/official/vision/serving/export_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for export library."""
from typing import List, Optional
import tensorflow as tf
# pylint: disable=g-long-lambda
def get_image_input_signatures(input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None):
"""Gets input signatures for an image.
Args:
input_type: A `str`, can be either tf_example, image_bytes, or image_tensor.
batch_size: `int` for batch size or None.
input_image_size: List[int] for the height and width of the input image.
num_channels: `int` for number of channels in the input image.
input_name: A `str` to set the input image name in the signature, if None,
a default name `inputs` will be used.
Returns:
tf.TensorSpec of the input tensor.
"""
if input_type == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[batch_size] + [None] * len(input_image_size) + [num_channels],
dtype=tf.uint8, name=input_name)
elif input_type in ['image_bytes', 'serve_examples', 'tf_example']:
input_signature = tf.TensorSpec(
shape=[batch_size], dtype=tf.string, name=input_name)
elif input_type == 'tflite':
input_signature = tf.TensorSpec(
shape=[1] + input_image_size + [num_channels],
dtype=tf.float32,
name=input_name)
else:
raise ValueError('Unrecognized `input_type`')
return input_signature
def decode_image(encoded_image_bytes: str,
input_image_size: List[int],
num_channels: int = 3,) -> tf.Tensor:
"""Decodes an image bytes to an image tensor.
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
and reshape it to desire shape.
Args:
encoded_image_bytes: An encoded image string to be decoded.
input_image_size: List[int] for the desired input size. This will be used to
infer whether the image is 2d or 3d.
num_channels: `int` for number of image channels.
Returns:
A decoded image tensor.
"""
if len(input_image_size) == 2:
# Decode an image if 2D input is expected.
image_tensor = tf.image.decode_image(
encoded_image_bytes, channels=num_channels)
else:
# Convert raw bytes into a tensor and reshape it, if not 2D input.
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
image_tensor.set_shape([None] * len(input_image_size) + [num_channels])
return image_tensor
def decode_image_tf_example(tf_example_string_tensor: tf.train.Example,
input_image_size: List[int],
num_channels: int = 3,
encoded_key: str = 'image/encoded'
) -> tf.Tensor:
"""Decodes a TF Example to an image tensor."""
keys_to_features = {
encoded_key: tf.io.FixedLenFeature((), tf.string, default_value=''),
}
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=keys_to_features)
image_tensor = decode_image(
parsed_tensors[encoded_key],
input_image_size=input_image_size,
num_channels=num_channels)
return image_tensor
def parse_image(
inputs, input_type: str, input_image_size: List[int], num_channels: int):
"""Parses image."""
if input_type in ['tf_example', 'serve_examples']:
decode_image_tf_example_fn = (
lambda x: decode_image_tf_example(x, input_image_size, num_channels))
image_tensor = tf.map_fn(
decode_image_tf_example_fn,
elems=inputs,
fn_output_signature=tf.TensorSpec(
shape=[None] * len(input_image_size) + [num_channels],
dtype=tf.uint8),
)
elif input_type == 'image_bytes':
decode_image_fn = lambda x: decode_image(x, input_image_size, num_channels)
image_tensor = tf.map_fn(
decode_image_fn, elems=inputs,
fn_output_signature=tf.TensorSpec(
shape=[None] * len(input_image_size) + [num_channels],
dtype=tf.uint8),)
else:
image_tensor = inputs
return image_tensor
| 4,871 | 37.0625 | 80 | py |
models | models-master/official/vision/serving/export_module_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for vision export modules."""
from typing import List, Optional
import tensorflow as tf
from official.core import config_definitions as cfg
from official.vision import configs
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_base_v2 as export_base
from official.vision.serving import export_utils
def create_classification_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Creats classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(
shape=[batch_size] + input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(
inputs, input_image_size, num_channels)
images = tf.map_fn(
preprocess_image_fn, elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels],
dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = export_base.ExportModule(params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3) -> export_base.ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(
params, input_type, batch_size, input_image_size, num_channels)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module
| 3,539 | 38.333333 | 75 | py |
models | models-master/official/vision/serving/export_module_factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for vision modules."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.core import export_base
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.dataloaders import classification_input
from official.vision.serving import export_module_factory
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self, input_type, input_image_size):
params = exp_factory.get_exp_config('resnet_imagenet')
params.task.model.backbone.resnet.model_id = 18
module = export_module_factory.create_classification_export_module(
params, input_type, batch_size=1, input_image_size=input_image_size)
return module
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 32, 32, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((32, 32, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type='image_tensor'):
input_image_size = [32, 32]
tmp_dir = self.get_temp_dir()
module = self._get_classification_module(input_type, input_image_size)
# Test that the model restores any attrs that are trackable objects
# (eg: tables, resource variables, keras models/layers, tf.hub modules).
module.model.test_trackable = tf.keras.layers.InputLayer(input_shape=(4,))
ckpt_path = tf.train.Checkpoint(model=module.model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, [input_type],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')))
imported = tf.saved_model.load(export_dir)
classification_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(
inputs, input_image_size, num_channels=3)
processed_images = tf.map_fn(
preprocess_image_fn,
elems=tf.zeros([1] + input_image_size + [3], dtype=tf.uint8),
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [3], dtype=tf.float32))
expected_logits = module.model(processed_images, training=False)
expected_prob = tf.nn.softmax(expected_logits)
out = classification_fn(tf.constant(images))
# The imported model should contain any trackable attrs that the original
# model had.
self.assertTrue(hasattr(imported.model, 'test_trackable'))
self.assertAllClose(
out['logits'].numpy(), expected_logits.numpy(), rtol=1e-04, atol=1e-04)
self.assertAllClose(
out['probs'].numpy(), expected_prob.numpy(), rtol=1e-04, atol=1e-04)
if __name__ == '__main__':
tf.test.main()
| 4,595 | 37.949153 | 79 | py |
models | models-master/official/vision/serving/export_saved_model_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.export_saved_model_lib."""
import os
from unittest import mock
import tensorflow as tf
from official.core import export_base
from official.vision import configs
from official.vision.serving import export_saved_model_lib
class WriteModelFlopsAndParamsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.tempdir = self.create_tempdir()
self.enter_context(
mock.patch.object(export_base, 'export', autospec=True, spec_set=True))
def _export_model_with_log_model_flops_and_params(self, params):
export_saved_model_lib.export_inference_graph(
input_type='image_tensor',
batch_size=1,
input_image_size=[64, 64],
params=params,
checkpoint_path=os.path.join(self.tempdir, 'unused-ckpt'),
export_dir=self.tempdir,
log_model_flops_and_params=True)
def assertModelAnalysisFilesExist(self):
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.tempdir, 'model_params.txt')))
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.tempdir, 'model_flops.txt')))
def test_retinanet_task(self):
params = configs.retinanet.retinanet_resnetfpn_coco()
params.task.model.backbone.resnet.model_id = 18
params.task.model.num_classes = 2
params.task.model.max_level = 6
self._export_model_with_log_model_flops_and_params(params)
self.assertModelAnalysisFilesExist()
def test_maskrcnn_task(self):
params = configs.maskrcnn.maskrcnn_resnetfpn_coco()
params.task.model.backbone.resnet.model_id = 18
params.task.model.num_classes = 2
params.task.model.max_level = 6
self._export_model_with_log_model_flops_and_params(params)
self.assertModelAnalysisFilesExist()
if __name__ == '__main__':
tf.test.main()
| 2,399 | 33.285714 | 79 | py |
models | models-master/official/vision/serving/export_saved_model_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export utility function for serving/inference."""
import os
from typing import Optional, List, Union, Text, Dict
from absl import logging
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.core import train_utils
from official.vision import configs
from official.vision.serving import detection
from official.vision.serving import image_classification
from official.vision.serving import semantic_segmentation
from official.vision.serving import video_classification
def export_inference_graph(
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_dir: str,
num_channels: Optional[int] = 3,
export_module: Optional[export_base.ExportModule] = None,
export_checkpoint_subdir: Optional[str] = None,
export_saved_model_subdir: Optional[str] = None,
save_options: Optional[tf.saved_model.SaveOptions] = None,
log_model_flops_and_params: bool = False,
checkpoint: Optional[tf.train.Checkpoint] = None,
input_name: Optional[str] = None,
function_keys: Optional[Union[List[Text], Dict[Text, Text]]] = None,
add_tpu_function_alias: Optional[bool] = False,
):
"""Exports inference graph for the model specified in the exp config.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
input_type: One of `image_tensor`, `image_bytes`, `tf_example` or `tflite`.
batch_size: 'int', or None.
input_image_size: List or Tuple of height and width.
params: Experiment params.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
num_channels: The number of input image channels.
export_module: Optional export module to be used instead of using params to
create one. If None, the params will be used to create an export module.
export_checkpoint_subdir: Optional subdirectory under export_dir to store
checkpoint.
export_saved_model_subdir: Optional subdirectory under export_dir to store
saved model.
save_options: `SaveOptions` for `tf.saved_model.save`.
log_model_flops_and_params: If True, writes model FLOPs to model_flops.txt
and model parameters to model_params.txt.
checkpoint: An optional tf.train.Checkpoint. If provided, the export module
will use it to read the weights.
input_name: The input tensor name, default at `None` which produces input
tensor name `inputs`.
function_keys: a list of string keys to retrieve pre-defined serving
signatures. The signaute keys will be set with defaults. If a dictionary
is provided, the values will be used as signature keys.
add_tpu_function_alias: Whether to add TPU function alias so that it can be
converted to a TPU compatible saved model later. Default is False.
"""
if export_checkpoint_subdir:
output_checkpoint_directory = os.path.join(
export_dir, export_checkpoint_subdir)
else:
output_checkpoint_directory = None
if export_saved_model_subdir:
output_saved_model_directory = os.path.join(
export_dir, export_saved_model_subdir)
else:
output_saved_model_directory = export_dir
# TODO(arashwan): Offers a direct path to use ExportModule with Task objects.
if not export_module:
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = image_classification.ClassificationModule(
params=params,
batch_size=batch_size,
input_image_size=input_image_size,
input_type=input_type,
num_channels=num_channels,
input_name=input_name)
elif isinstance(params.task, configs.retinanet.RetinaNetTask) or isinstance(
params.task, configs.maskrcnn.MaskRCNNTask):
export_module = detection.DetectionModule(
params=params,
batch_size=batch_size,
input_image_size=input_image_size,
input_type=input_type,
num_channels=num_channels,
input_name=input_name)
elif isinstance(params.task,
configs.semantic_segmentation.SemanticSegmentationTask):
export_module = semantic_segmentation.SegmentationModule(
params=params,
batch_size=batch_size,
input_image_size=input_image_size,
input_type=input_type,
num_channels=num_channels,
input_name=input_name)
elif isinstance(params.task,
configs.video_classification.VideoClassificationTask):
export_module = video_classification.VideoClassificationModule(
params=params,
batch_size=batch_size,
input_image_size=input_image_size,
input_type=input_type,
num_channels=num_channels,
input_name=input_name)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
if add_tpu_function_alias:
save_options = tf.saved_model.SaveOptions(
function_aliases={
'tpu_candidate': export_module.inference_from_image_tensors,
}
)
export_base.export(
export_module,
function_keys=function_keys if function_keys else [input_type],
export_savedmodel_dir=output_saved_model_directory,
checkpoint=checkpoint,
checkpoint_path=checkpoint_path,
timestamped=False,
save_options=save_options)
if output_checkpoint_directory:
ckpt = tf.train.Checkpoint(model=export_module.model)
ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt'))
train_utils.serialize_config(params, export_dir)
if log_model_flops_and_params:
inputs_kwargs = None
if isinstance(
params.task,
(configs.retinanet.RetinaNetTask, configs.maskrcnn.MaskRCNNTask)):
# We need to create inputs_kwargs argument to specify the input shapes for
# subclass model that overrides model.call to take multiple inputs,
# e.g., RetinaNet model.
inputs_kwargs = {
'images':
tf.TensorSpec([1] + input_image_size + [num_channels],
tf.float32),
'image_shape':
tf.TensorSpec([1, 2], tf.float32)
}
dummy_inputs = {
k: tf.ones(v.shape.as_list(), tf.float32)
for k, v in inputs_kwargs.items()
}
# Must do forward pass to build the model.
export_module.model(**dummy_inputs)
else:
logging.info(
'Logging model flops and params not implemented for %s task.',
type(params.task))
return
train_utils.try_count_flops(export_module.model, inputs_kwargs,
os.path.join(export_dir, 'model_flops.txt'))
train_utils.write_model_params(export_module.model,
os.path.join(export_dir, 'model_params.txt'))
| 7,665 | 39.560847 | 80 | py |
models | models-master/official/vision/serving/semantic_segmentation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for semantic segmentation export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import semantic_segmentation
class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_segmentation_module(self,
input_type,
rescale_output,
preserve_aspect_ratio,
batch_size=1):
params = exp_factory.get_exp_config('mnv2_deeplabv3_pascal')
params.task.export_config.rescale_output = rescale_output
params.task.train_data.preserve_aspect_ratio = preserve_aspect_ratio
segmentation_module = semantic_segmentation.SegmentationModule(
params,
batch_size=batch_size,
input_image_size=[112, 112],
input_type=input_type)
return segmentation_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type, input_image_size):
"""Get dummy input for the given input type."""
height = input_image_size[0]
width = input_image_size[1]
if input_type == 'image_tensor':
return tf.zeros((1, height, width, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((height, width, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((height, width, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
elif input_type == 'tflite':
return tf.zeros((1, height, width, 3), dtype=np.float32)
@parameterized.parameters(
('image_tensor', False, [112, 112], False),
('image_bytes', False, [112, 112], False),
('tf_example', False, [112, 112], True),
('tflite', False, [112, 112], False),
('image_tensor', True, [112, 56], True),
('image_bytes', True, [112, 56], True),
('tf_example', True, [56, 112], False),
)
def test_export(self, input_type, rescale_output, input_image_size,
preserve_aspect_ratio):
tmp_dir = self.get_temp_dir()
module = self._get_segmentation_module(
input_type=input_type,
rescale_output=rescale_output,
preserve_aspect_ratio=preserve_aspect_ratio)
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
segmentation_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type, input_image_size)
if input_type != 'tflite':
processed_images, _ = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._build_inputs,
elems=tf.zeros((1, 112, 112, 3), dtype=tf.uint8),
fn_output_signature=(tf.TensorSpec(
shape=[112, 112, 3], dtype=tf.float32),
tf.TensorSpec(
shape=[4, 2], dtype=tf.float32))))
else:
processed_images = images
logits = module.model(processed_images, training=False)['logits']
if rescale_output:
expected_output = tf.image.resize(
logits, input_image_size, method='bilinear')
else:
expected_output = tf.image.resize(logits, [112, 112], method='bilinear')
out = segmentation_fn(tf.constant(images))
self.assertAllClose(out['logits'].numpy(), expected_output.numpy())
def test_export_invalid_batch_size(self):
batch_size = 3
tmp_dir = self.get_temp_dir()
module = self._get_segmentation_module(
input_type='image_tensor',
rescale_output=True,
preserve_aspect_ratio=False,
batch_size=batch_size)
with self.assertRaisesRegex(ValueError,
'Batch size cannot be more than 1.'):
self._export_from_module(module, 'image_tensor', tmp_dir)
if __name__ == '__main__':
tf.test.main()
| 5,628 | 37.554795 | 79 | py |
models | models-master/official/vision/serving/detection_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for image detection export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import detection
class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_detection_module(
self,
experiment_name,
input_type,
outer_boxes_scale=1.0,
apply_nms=True,
normalized_coordinates=False,
nms_version='batched',
output_intermediate_features=False,
):
params = exp_factory.get_exp_config(experiment_name)
params.task.model.outer_boxes_scale = outer_boxes_scale
params.task.model.backbone.resnet.model_id = 18
params.task.model.detection_generator.apply_nms = apply_nms
if normalized_coordinates:
params.task.export_config.output_normalized_coordinates = True
params.task.model.detection_generator.nms_version = nms_version
if output_intermediate_features:
params.task.export_config.output_intermediate_features = True
detection_module = detection.DetectionModule(
params,
batch_size=1,
input_image_size=[640, 640],
input_type=input_type)
return detection_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type, batch_size, image_size):
"""Gets dummy input for the given input type."""
h, w = image_size
if input_type == 'image_tensor':
return tf.zeros((batch_size, h, w, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue() for b in range(batch_size)]
elif input_type == 'tf_example':
image_tensor = tf.zeros((h, w, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example for b in range(batch_size)]
elif input_type == 'tflite':
return tf.zeros((batch_size, h, w, 3), dtype=np.float32)
@parameterized.parameters(
('image_tensor', 'fasterrcnn_resnetfpn_coco', [384, 384]),
('image_bytes', 'fasterrcnn_resnetfpn_coco', [640, 640]),
('tf_example', 'fasterrcnn_resnetfpn_coco', [640, 640]),
('tflite', 'fasterrcnn_resnetfpn_coco', [640, 640]),
('image_tensor', 'maskrcnn_resnetfpn_coco', [640, 640]),
('image_bytes', 'maskrcnn_resnetfpn_coco', [640, 384]),
('tf_example', 'maskrcnn_resnetfpn_coco', [640, 640]),
('tflite', 'maskrcnn_resnetfpn_coco', [640, 640]),
('image_tensor', 'retinanet_resnetfpn_coco', [640, 640]),
('image_bytes', 'retinanet_resnetfpn_coco', [640, 640]),
('tf_example', 'retinanet_resnetfpn_coco', [384, 640]),
('tflite', 'retinanet_resnetfpn_coco', [640, 640]),
('image_tensor', 'retinanet_resnetfpn_coco', [384, 384]),
('image_bytes', 'retinanet_spinenet_coco', [640, 640]),
('tf_example', 'retinanet_spinenet_coco', [640, 384]),
('tflite', 'retinanet_spinenet_coco', [640, 640]),
('image_tensor', 'fasterrcnn_resnetfpn_coco', [384, 384], 1.1),
('tf_example', 'maskrcnn_resnetfpn_coco', [640, 640], 1.1),
('image_tensor', 'fasterrcnn_resnetfpn_coco', [384, 384], 1.1, 'v2'),
)
def test_export(
self,
input_type,
experiment_name,
image_size,
outer_boxes_scale=1.0,
nms_version='batched',
):
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(
experiment_name, input_type, outer_boxes_scale, nms_version)
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
input_type, batch_size=1, image_size=image_size)
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
expected_outputs = signatures['serving_default'](tf.constant(images))
outputs = detection_fn(tf.constant(images))
self.assertAllEqual(outputs['detection_boxes'].numpy(),
expected_outputs['detection_boxes'].numpy())
# Outer boxes have not been supported in RetinaNet models.
if 'retinanet' not in experiment_name:
if module.params.task.model.include_mask and outer_boxes_scale > 1.0:
self.assertAllEqual(outputs['detection_outer_boxes'].numpy(),
expected_outputs['detection_outer_boxes'].numpy())
self.assertAllEqual(outputs['detection_classes'].numpy(),
expected_outputs['detection_classes'].numpy())
self.assertAllEqual(outputs['detection_scores'].numpy(),
expected_outputs['detection_scores'].numpy())
self.assertAllEqual(outputs['num_detections'].numpy(),
expected_outputs['num_detections'].numpy())
@parameterized.parameters(('retinanet_resnetfpn_coco',),
('maskrcnn_spinenet_coco',))
def test_build_model_pass_with_none_batch_size(self, experiment_type):
params = exp_factory.get_exp_config(experiment_type)
detection.DetectionModule(
params, batch_size=None, input_image_size=[640, 640])
def test_export_retinanet_with_intermediate_features(self):
tmp_dir = self.get_temp_dir()
input_type = 'image_tensor'
module = self._get_detection_module(
'retinanet_resnetfpn_coco',
input_type,
output_intermediate_features=True,
)
self._export_from_module(module, input_type, tmp_dir)
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
input_type, batch_size=1, image_size=[384, 384]
)
outputs = detection_fn(tf.constant(images))
self.assertContainsSubset(
{
'backbone_3',
'backbone_4',
'backbone_5',
'decoder_3',
'decoder_4',
'decoder_5',
'decoder_6',
'decoder_7',
},
outputs.keys(),
)
@parameterized.parameters(
('image_tensor', 'retinanet_resnetfpn_coco', [640, 640]),
('image_bytes', 'retinanet_resnetfpn_coco', [640, 640]),
('tf_example', 'retinanet_resnetfpn_coco', [384, 640]),
('tflite', 'retinanet_resnetfpn_coco', [640, 640]),
('image_tensor', 'retinanet_resnetfpn_coco', [384, 384]),
('image_bytes', 'retinanet_spinenet_coco', [640, 640]),
('tf_example', 'retinanet_spinenet_coco', [640, 384]),
('tflite', 'retinanet_spinenet_coco', [640, 640]),
)
def test_export_normalized_coordinates_no_nms(
self,
input_type,
experiment_name,
image_size,
):
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(
experiment_name,
input_type,
apply_nms=False,
normalized_coordinates=True,
)
self._export_from_module(module, input_type, tmp_dir)
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
input_type, batch_size=1, image_size=image_size
)
outputs = detection_fn(tf.constant(images))
min_values = tf.math.reduce_min(outputs['decoded_boxes'])
max_values = tf.math.reduce_max(outputs['decoded_boxes'])
self.assertAllGreaterEqual(
min_values.numpy(), tf.zeros_like(min_values).numpy()
)
self.assertAllLessEqual(
max_values.numpy(), tf.ones_like(max_values).numpy()
)
if __name__ == '__main__':
tf.test.main()
| 9,155 | 37.470588 | 79 | py |
models | models-master/official/vision/serving/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation input and model functions for serving/inference."""
import tensorflow as tf
from official.vision.modeling import factory
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class SegmentationModule(export_base.ExportModule):
"""Segmentation Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return factory.build_segmentation_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def _build_inputs(self, image):
"""Builds classification model inputs for serving."""
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
if self.params.task.train_data.preserve_aspect_ratio:
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._input_image_size,
padded_size=self._input_image_size,
aug_scale_min=1.0,
aug_scale_max=1.0)
else:
image, image_info = preprocess_ops.resize_image(image,
self._input_image_size)
return image, image_info
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
image_info = None
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.inference_step(images)
# Optionally resize prediction to the input image size.
if self.params.task.export_config.rescale_output:
logits = outputs['logits']
if logits.shape[0] != 1:
raise ValueError('Batch size cannot be more than 1.')
image_shape = tf.cast(image_info[0, 0, :], tf.int32)
if self.params.task.train_data.preserve_aspect_ratio:
rescale_size = tf.cast(
tf.math.ceil(image_info[0, 1, :] / image_info[0, 2, :]), tf.int32)
offsets = tf.cast(image_info[0, 3, :], tf.int32)
logits = tf.image.resize(logits, rescale_size, method='bilinear')
outputs['logits'] = tf.image.crop_to_bounding_box(
logits, offsets[0], offsets[1], image_shape[0], image_shape[1])
else:
outputs['logits'] = tf.image.resize(
logits, [image_shape[0], image_shape[1]], method='bilinear')
else:
outputs['logits'] = tf.image.resize(
outputs['logits'], self._input_image_size, method='bilinear')
if image_info is not None:
outputs.update({'image_info': image_info})
return outputs
| 4,013 | 36.166667 | 79 | py |
models | models-master/official/vision/serving/export_saved_model_lib_v2.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export utility function for serving/inference."""
import os
from typing import Optional, List, Union, Text, Dict
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.core import train_utils
from official.vision.serving import export_module_factory
def export(
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_dir: str,
num_channels: Optional[int] = 3,
export_module: Optional[export_base.ExportModule] = None,
export_checkpoint_subdir: Optional[str] = None,
export_saved_model_subdir: Optional[str] = None,
function_keys: Optional[Union[List[Text], Dict[Text, Text]]] = None,
save_options: Optional[tf.saved_model.SaveOptions] = None):
"""Exports the model specified in the exp config.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
input_type: One of `image_tensor`, `image_bytes`, `tf_example`.
batch_size: 'int', or None.
input_image_size: List or Tuple of height and width.
params: Experiment params.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
num_channels: The number of input image channels.
export_module: Optional export module to be used instead of using params
to create one. If None, the params will be used to create an export
module.
export_checkpoint_subdir: Optional subdirectory under export_dir
to store checkpoint.
export_saved_model_subdir: Optional subdirectory under export_dir
to store saved model.
function_keys: a list of string keys to retrieve pre-defined serving
signatures. The signaute keys will be set with defaults. If a dictionary
is provided, the values will be used as signature keys.
save_options: `SaveOptions` for `tf.saved_model.save`.
"""
if export_checkpoint_subdir:
output_checkpoint_directory = os.path.join(
export_dir, export_checkpoint_subdir)
else:
output_checkpoint_directory = None
if export_saved_model_subdir:
output_saved_model_directory = os.path.join(
export_dir, export_saved_model_subdir)
else:
output_saved_model_directory = export_dir
export_module = export_module_factory.get_export_module(
params,
input_type=input_type,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
export_base.export(
export_module,
function_keys=function_keys if function_keys else [input_type],
export_savedmodel_dir=output_saved_model_directory,
checkpoint_path=checkpoint_path,
timestamped=False,
save_options=save_options)
if output_checkpoint_directory:
ckpt = tf.train.Checkpoint(model=export_module.model)
ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt'))
train_utils.serialize_config(params, export_dir)
| 3,700 | 36.765306 | 78 | py |
models | models-master/official/vision/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for exporting models."""
from official.vision.serving import export_saved_model_lib
| 702 | 40.352941 | 74 | py |
models | models-master/official/vision/serving/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for image classification export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import image_classification
class ImageClassificationExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_classification_module(self, input_type):
params = exp_factory.get_exp_config('resnet_imagenet')
params.task.model.backbone.resnet.model_id = 18
classification_module = image_classification.ClassificationModule(
params,
batch_size=1,
input_image_size=[224, 224],
input_type=input_type)
return classification_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module,
save_directory,
signatures=signatures)
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 224, 224, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((224, 224, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((224, 224, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
elif input_type == 'tflite':
return tf.zeros((1, 224, 224, 3), dtype=np.float32)
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
{'input_type': 'tflite'},
)
def test_export(self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
module = self._get_classification_module(input_type)
# Test that the model restores any attrs that are trackable objects
# (eg: tables, resource variables, keras models/layers, tf.hub modules).
module.model.test_trackable = tf.keras.layers.InputLayer(input_shape=(4,))
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(os.path.exists(
os.path.join(tmp_dir, 'variables', 'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
classification_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
if input_type != 'tflite':
processed_images = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(
module._build_inputs,
elems=tf.zeros((1, 224, 224, 3), dtype=tf.uint8),
fn_output_signature=tf.TensorSpec(
shape=[224, 224, 3], dtype=tf.float32)))
else:
processed_images = images
expected_logits = module.model(processed_images, training=False)
expected_prob = tf.nn.softmax(expected_logits)
out = classification_fn(tf.constant(images))
# The imported model should contain any trackable attrs that the original
# model had.
self.assertTrue(hasattr(imported.model, 'test_trackable'))
self.assertAllClose(out['logits'].numpy(), expected_logits.numpy())
self.assertAllClose(out['probs'].numpy(), expected_prob.numpy())
if __name__ == '__main__':
tf.test.main()
| 4,612 | 37.123967 | 79 | py |
models | models-master/official/vision/serving/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification input and model functions for serving/inference."""
import tensorflow as tf
from official.vision.modeling import factory
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class ClassificationModule(export_base.ExportModule):
"""classification Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return factory.build_classification_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def _build_inputs(self, image):
"""Builds classification model inputs for serving."""
# Center crops and resizes image.
if self.params.task.train_data.aug_crop:
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, self._input_image_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.reshape(
image, [self._input_image_size[0], self._input_image_size[1], 3])
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
return image
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32),
parallel_iterations=32))
logits = self.inference_step(images)
if self.params.task.train_data.is_multilabel:
probs = tf.math.sigmoid(logits)
else:
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
| 2,858 | 33.445783 | 79 | py |
models | models-master/official/vision/serving/export_base.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
import abc
from typing import Dict, List, Mapping, Optional, Text
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
class ExportModule(export_base.ExportModule, metaclass=abc.ABCMeta):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
batch_size: int,
input_image_size: List[int],
input_type: str = 'image_tensor',
num_channels: int = 3,
model: Optional[tf.keras.Model] = None,
input_name: Optional[str] = None):
"""Initializes a module for export.
Args:
params: Experiment params.
batch_size: The batch size of the model input. Can be `int` or None.
input_image_size: List or Tuple of size of the input image. For 2D image,
it is [height, width].
input_type: The input signature type.
num_channels: The number of the image channels.
model: A tf.keras.Model instance to be exported.
input_name: A customized input tensor name.
"""
self.params = params
self._batch_size = batch_size
self._input_image_size = input_image_size
self._num_channels = num_channels
self._input_type = input_type
self._input_name = input_name
if model is None:
model = self._build_model() # pylint: disable=assignment-from-none
super().__init__(params=params, model=model)
def _decode_image(self, encoded_image_bytes: str) -> tf.Tensor:
"""Decodes an image bytes to an image tensor.
Use `tf.image.decode_image` to decode an image if input is expected to be 2D
image; otherwise use `tf.io.decode_raw` to convert the raw bytes to tensor
and reshape it to desire shape.
Args:
encoded_image_bytes: An encoded image string to be decoded.
Returns:
A decoded image tensor.
"""
if len(self._input_image_size) == 2:
# Decode an image if 2D input is expected.
image_tensor = tf.image.decode_image(
encoded_image_bytes, channels=self._num_channels)
image_tensor.set_shape((None, None, self._num_channels))
else:
# Convert raw bytes into a tensor and reshape it, if not 2D input.
image_tensor = tf.io.decode_raw(encoded_image_bytes, out_type=tf.uint8)
image_tensor = tf.reshape(image_tensor,
self._input_image_size + [self._num_channels])
return image_tensor
def _decode_tf_example(
self, tf_example_string_tensor: tf.train.Example) -> tf.Tensor:
"""Decodes a TF Example to an image tensor.
Args:
tf_example_string_tensor: A tf.train.Example of encoded image and other
information.
Returns:
A decoded image tensor.
"""
keys_to_features = {'image/encoded': tf.io.FixedLenFeature((), tf.string)}
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=keys_to_features)
image_tensor = self._decode_image(parsed_tensors['image/encoded'])
image_tensor.set_shape(
[None] * len(self._input_image_size) + [self._num_channels]
)
return image_tensor
def _build_model(self, **kwargs):
"""Returns a model built from the params."""
return None
@tf.function
def inference_from_image_tensors(
self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_for_tflite(self, inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
return self.serve(inputs)
@tf.function
def inference_from_image_bytes(self, inputs: tf.Tensor):
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_image,
elems=inputs,
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
@tf.function
def inference_from_tf_example(self,
inputs: tf.Tensor) -> Mapping[str, tf.Tensor]:
with tf.device('cpu:0'):
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._decode_tf_example,
elems=inputs,
# Height/width of the shape of input images is unspecified (None)
# at the time of decoding the example, but the shape will
# be adjusted to conform to the input layer of the model,
# by _run_inference_on_image_tensors() below.
fn_output_signature=tf.TensorSpec(
shape=[None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8),
dtype=tf.uint8,
parallel_iterations=32))
images = tf.stack(images)
return self.serve(images)
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for key, def_name in function_keys.items():
if key == 'image_tensor':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + [None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8,
name=self._input_name)
signatures[
def_name] = self.inference_from_image_tensors.get_concrete_function(
input_signature)
elif key == 'image_bytes':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string, name=self._input_name)
signatures[
def_name] = self.inference_from_image_bytes.get_concrete_function(
input_signature)
elif key == 'serve_examples' or key == 'tf_example':
input_signature = tf.TensorSpec(
shape=[self._batch_size], dtype=tf.string, name=self._input_name)
signatures[
def_name] = self.inference_from_tf_example.get_concrete_function(
input_signature)
elif key == 'tflite':
input_signature = tf.TensorSpec(
shape=[self._batch_size] + self._input_image_size +
[self._num_channels],
dtype=tf.float32,
name=self._input_name)
signatures[def_name] = self.inference_for_tflite.get_concrete_function(
input_signature)
else:
raise ValueError('Unrecognized `input_type`')
return signatures
| 7,531 | 36.66 | 80 | py |
models | models-master/official/vision/serving/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection input and model functions for serving/inference."""
from typing import Mapping, Tuple
from absl import logging
import tensorflow as tf
from official.vision import configs
from official.vision.modeling import factory
from official.vision.ops import anchor
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
from official.vision.serving import export_base
class DetectionModule(export_base.ExportModule):
"""Detection Module."""
def _build_model(self):
nms_versions_supporting_dynamic_batch_size = {'batched', 'v2', 'v3'}
nms_version = self.params.task.model.detection_generator.nms_version
if (self._batch_size is None and
nms_version not in nms_versions_supporting_dynamic_batch_size):
logging.info('nms_version is set to `batched` because `%s` '
'does not support with dynamic batch size.', nms_version)
self.params.task.model.detection_generator.nms_version = 'batched'
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
if isinstance(self.params.task.model, configs.maskrcnn.MaskRCNN):
model = factory.build_maskrcnn(
input_specs=input_specs, model_config=self.params.task.model)
elif isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model = factory.build_retinanet(
input_specs=input_specs, model_config=self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
def _build_anchor_boxes(self):
"""Builds and returns anchor boxes."""
model_params = self.params.task.model
input_anchor = anchor.build_anchor_generator(
min_level=model_params.min_level,
max_level=model_params.max_level,
num_scales=model_params.anchor.num_scales,
aspect_ratios=model_params.anchor.aspect_ratios,
anchor_size=model_params.anchor.anchor_size)
return input_anchor(
image_size=(self._input_image_size[0], self._input_image_size[1]))
def _build_inputs(self, image):
"""Builds detection model inputs for serving."""
model_params = self.params.task.model
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._input_image_size,
padded_size=preprocess_ops.compute_padded_size(
self._input_image_size, 2**model_params.max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
anchor_boxes = self._build_anchor_boxes()
return image, anchor_boxes, image_info
def _normalize_coordinates(self, detections_dict, dict_keys, image_info):
"""Normalizes detection coordinates between 0 and 1.
Args:
detections_dict: Dictionary containing the output of the model prediction.
dict_keys: Key names corresponding to the tensors of the output dictionary
that we want to update.
image_info: Tensor containing the details of the image resizing.
Returns:
detections_dict: Updated detection dictionary.
"""
for key in dict_keys:
if key not in detections_dict:
continue
detection_boxes = detections_dict[key] / tf.tile(
image_info[:, 2:3, :], [1, 1, 2]
)
detections_dict[key] = box_ops.normalize_boxes(
detection_boxes, image_info[:, 0:1, :]
)
detections_dict[key] = tf.clip_by_value(detections_dict[key], 0.0, 1.0)
return detections_dict
def preprocess(
self, images: tf.Tensor
) -> Tuple[tf.Tensor, Mapping[str, tf.Tensor], tf.Tensor]:
"""Preprocesses inputs to be suitable for the model.
Args:
images: The images tensor.
Returns:
images: The images tensor cast to float.
anchor_boxes: Dict mapping anchor levels to anchor boxes.
image_info: Tensor containing the details of the image resizing.
"""
model_params = self.params.task.model
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec = tf.TensorSpec(shape=self._input_image_size + [3],
dtype=tf.float32)
num_anchors = model_params.anchor.num_scales * len(
model_params.anchor.aspect_ratios) * 4
anchor_shapes = []
for level in range(model_params.min_level, model_params.max_level + 1):
anchor_level_spec = tf.TensorSpec(
shape=[
self._input_image_size[0] // 2**level,
self._input_image_size[1] // 2**level, num_anchors
],
dtype=tf.float32)
anchor_shapes.append((str(level), anchor_level_spec))
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, anchor_boxes, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, dict(anchor_shapes),
image_info_spec),
parallel_iterations=32))
return images, anchor_boxes, image_info
def serve(self, images: tf.Tensor):
"""Casts image to float and runs inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
if self._input_type != 'tflite':
images, anchor_boxes, image_info = self.preprocess(images)
else:
with tf.device('cpu:0'):
anchor_boxes = self._build_anchor_boxes()
# image_info is a 3D tensor of shape [batch_size, 4, 2]. It is in the
# format of [[original_height, original_width],
# [desired_height, desired_width], [y_scale, x_scale],
# [y_offset, x_offset]]. When input_type is tflite, input image is
# supposed to be preprocessed already.
image_info = tf.convert_to_tensor([[
self._input_image_size, self._input_image_size, [1.0, 1.0], [0, 0]
]],
dtype=tf.float32)
input_image_shape = image_info[:, 1, :]
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
model_call_kwargs = {
'images': images,
'image_shape': input_image_shape,
'anchor_boxes': anchor_boxes,
'training': False,
}
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model_call_kwargs['output_intermediate_features'] = (
self.params.task.export_config.output_intermediate_features
)
detections = self.model.call(**model_call_kwargs)
if self.params.task.model.detection_generator.apply_nms:
# For RetinaNet model, apply export_config.
# TODO(huizhongc): Add export_config to fasterrcnn and maskrcnn as needed.
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
export_config = self.params.task.export_config
# Normalize detection box coordinates to [0, 1].
if export_config.output_normalized_coordinates:
keys = ['detection_boxes', 'detection_outer_boxes']
detections = self._normalize_coordinates(detections, keys, image_info)
# Cast num_detections and detection_classes to float. This allows the
# model inference to work on chain (go/chain) as chain requires floating
# point outputs.
if export_config.cast_num_detections_to_float:
detections['num_detections'] = tf.cast(
detections['num_detections'], dtype=tf.float32)
if export_config.cast_detection_classes_to_float:
detections['detection_classes'] = tf.cast(
detections['detection_classes'], dtype=tf.float32)
final_outputs = {
'detection_boxes': detections['detection_boxes'],
'detection_scores': detections['detection_scores'],
'detection_classes': detections['detection_classes'],
'num_detections': detections['num_detections']
}
if 'detection_outer_boxes' in detections:
final_outputs['detection_outer_boxes'] = (
detections['detection_outer_boxes'])
else:
# For RetinaNet model, apply export_config.
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
export_config = self.params.task.export_config
# Normalize detection box coordinates to [0, 1].
if export_config.output_normalized_coordinates:
keys = ['decoded_boxes']
detections = self._normalize_coordinates(detections, keys, image_info)
final_outputs = {
'decoded_boxes': detections['decoded_boxes'],
'decoded_box_scores': detections['decoded_box_scores']
}
if 'detection_masks' in detections.keys():
final_outputs['detection_masks'] = detections['detection_masks']
if (
isinstance(self.params.task.model, configs.retinanet.RetinaNet)
and self.params.task.export_config.output_intermediate_features
):
final_outputs.update(
{
k: v
for k, v in detections.items()
if k.startswith('backbone_') or k.startswith('decoder_')
}
)
if self.params.task.model.detection_generator.nms_version != 'tflite':
final_outputs.update({'image_info': image_info})
return final_outputs
| 10,516 | 39.45 | 80 | py |
models | models-master/official/vision/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Binary to convert a saved model to tflite model.
It requires a SavedModel exported using export_saved_model.py with batch size 1
and input type `tflite`, and using the same config file used for exporting saved
model. It includes optional post-training quantization. When using integer
quantization, calibration steps need to be provided to calibrate model input.
To convert a SavedModel to a TFLite model:
EXPERIMENT_TYPE = XX
TFLITE_PATH = XX
SAVED_MOODEL_DIR = XX
CONFIG_FILE = XX
export_tflite --experiment=${EXPERIMENT_TYPE} \
--saved_model_dir=${SAVED_MOODEL_DIR} \
--tflite_path=${TFLITE_PATH} \
--config_file=${CONFIG_FILE} \
--quant_type=fp16 \
--calibration_steps=500
"""
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_tflite_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment',
None,
'experiment type, e.g. retinanet_resnetfpn_coco',
required=True)
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default='',
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
_SAVED_MODEL_DIR = flags.DEFINE_string(
'saved_model_dir', None, 'The directory to the saved model.', required=True)
_TFLITE_PATH = flags.DEFINE_string(
'tflite_path', None, 'The path to the output tflite model.', required=True)
_QUANT_TYPE = flags.DEFINE_string(
'quant_type',
default=None,
help='Post training quantization type. Support `int8_fallback`, '
'`int8_full_fp32_io`, `int8_full`, `fp16`, `qat`, `qat_fp32_io`, '
'`int8_full_int8_io` and `default`. See '
'https://www.tensorflow.org/lite/performance/post_training_quantization '
'for more details.')
_CALIBRATION_STEPS = flags.DEFINE_integer(
'calibration_steps', 500,
'The number of calibration steps for integer model.')
_DENYLISTED_OPS = flags.DEFINE_string(
'denylisted_ops', '', 'The comma-separated string of ops '
'that are excluded from integer quantization. The name of '
'ops should be all capital letters, such as CAST or GREATER.'
'This is useful to exclude certains ops that affects quality or latency. '
'Valid ops that should not be included are quantization friendly ops, such '
'as CONV_2D, DEPTHWISE_CONV_2D, FULLY_CONNECTED, etc.')
def main(_) -> None:
params = exp_factory.get_exp_config(_EXPERIMENT.value)
if _CONFIG_FILE.value is not None:
for config_file in _CONFIG_FILE.value or []:
try:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, config_file, is_strict=False
)
if _PARAMS_OVERRIDE.value:
try:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=True
)
except KeyError:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=False
)
params.validate()
params.lock()
logging.info('Converting SavedModel from %s to TFLite model...',
_SAVED_MODEL_DIR.value)
if _DENYLISTED_OPS.value:
denylisted_ops = list(_DENYLISTED_OPS.value.split(','))
else:
denylisted_ops = None
tflite_model = export_tflite_lib.convert_tflite_model(
saved_model_dir=_SAVED_MODEL_DIR.value,
quant_type=_QUANT_TYPE.value,
params=params,
calibration_steps=_CALIBRATION_STEPS.value,
denylisted_ops=denylisted_ops)
with tf.io.gfile.GFile(_TFLITE_PATH.value, 'wb') as fw:
fw.write(tflite_model)
logging.info('TFLite model converted and saved to %s.', _TFLITE_PATH.value)
if __name__ == '__main__':
app.run(main)
| 5,105 | 36.544118 | 80 | py |
models | models-master/official/vision/configs/backbones_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D Backbones configurations."""
from typing import Optional, Tuple
# Import libraries
import dataclasses
from official.modeling import hyperparams
@dataclasses.dataclass
class ResNet3DBlock(hyperparams.Config):
"""Configuration of a ResNet 3D block."""
temporal_strides: int = 1
temporal_kernel_sizes: Tuple[int, ...] = ()
use_self_gating: bool = False
@dataclasses.dataclass
class ResNet3D(hyperparams.Config):
"""ResNet config."""
model_id: int = 50
stem_type: str = 'v0'
stem_conv_temporal_kernel_size: int = 5
stem_conv_temporal_stride: int = 2
stem_pool_temporal_stride: int = 2
block_specs: Tuple[ResNet3DBlock, ...] = ()
stochastic_depth_drop_rate: float = 0.0
se_ratio: float = 0.0
@dataclasses.dataclass
class ResNet3D50(ResNet3D):
"""Block specifications of the Resnet50 (3D) model."""
model_id: int = 50
block_specs: Tuple[
ResNet3DBlock, ResNet3DBlock, ResNet3DBlock, ResNet3DBlock] = (
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(3, 3, 3),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(3, 1, 3, 1),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(3, 1, 3, 1, 3, 1),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(1, 3, 1),
use_self_gating=True))
@dataclasses.dataclass
class ResNet3DRS(ResNet3D):
"""Block specifications of the ResNet-RS (3D) model."""
model_id: int = 50
stem_type: str = 'v1'
stem_conv_temporal_kernel_size: int = 5
stem_conv_temporal_stride: int = 2
stem_pool_temporal_stride: int = 2
stochastic_depth_drop_rate: float = 0.1
se_ratio: float = 0.2
block_specs: Tuple[
ResNet3DBlock, ResNet3DBlock, ResNet3DBlock, ResNet3DBlock] = (
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(1,),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(1,),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(3,),
use_self_gating=True),
ResNet3DBlock(temporal_strides=1,
temporal_kernel_sizes=(3,),
use_self_gating=True))
@dataclasses.dataclass
class Backbone3D(hyperparams.OneOfConfig):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, one of the fields below.
resnet_3d: resnet3d backbone config.
resnet_3d_rs: resnet3d-rs backbone config.
"""
type: Optional[str] = None
resnet_3d: ResNet3D = dataclasses.field(default_factory=ResNet3D50)
resnet_3d_rs: ResNet3D = dataclasses.field(default_factory=ResNet3DRS)
| 3,614 | 34.097087 | 74 | py |
models | models-master/official/vision/configs/video_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for video_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import video_classification as exp_cfg
class VideoClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('video_classification',),
('video_classification_ucf101',),
('video_classification_kinetics400',),
('video_classification_kinetics600',),
('video_classification_kinetics700',),
('video_classification_kinetics700_2020',),
)
def test_video_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)
self.assertIsInstance(config.task.model, exp_cfg.VideoClassificationModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,869 | 35.666667 | 78 | py |
models | models-master/official/vision/configs/video_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification configuration definition."""
import dataclasses
from typing import Optional, Tuple, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import backbones_3d
from official.vision.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""The base configuration for building datasets."""
name: Optional[str] = None
file_type: Optional[str] = 'tfrecord'
compressed_input: bool = False
split: str = 'train'
variant_name: Optional[str] = None
feature_shape: Tuple[int, ...] = (64, 224, 224, 3)
temporal_stride: int = 1
random_stride_range: int = 0
num_test_clips: int = 1
num_test_crops: int = 1
num_classes: int = -1
num_examples: int = -1
global_batch_size: int = 128
data_format: str = 'channels_last'
dtype: str = 'float32'
label_dtype: str = 'int32'
one_hot: bool = True
shuffle_buffer_size: int = 64
cache: bool = False
input_path: Union[str, cfg.base_config.Config] = ''
is_training: bool = True
cycle_length: int = 10
drop_remainder: bool = True
min_image_size: int = 256
zero_centering_image: bool = False
is_multilabel: bool = False
output_audio: bool = False
audio_feature: str = ''
audio_feature_shape: Tuple[int, ...] = (-1,)
aug_min_aspect_ratio: float = 0.5
aug_max_aspect_ratio: float = 2.0
aug_min_area_ratio: float = 0.49
aug_max_area_ratio: float = 1.0
aug_type: Optional[
common.Augmentation] = None # AutoAugment and RandAugment.
mixup_and_cutmix: Optional[common.MixupAndCutmix] = None
image_field_key: str = 'image/encoded'
label_field_key: str = 'clip/label/index'
def kinetics400(is_training):
"""Generated Kinetics 400 dataset configs."""
return DataConfig(
name='kinetics400',
num_classes=400,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=215570 if is_training else 17706,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics600(is_training):
"""Generated Kinetics 600 dataset configs."""
return DataConfig(
name='kinetics600',
num_classes=600,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=366016 if is_training else 27780,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics700(is_training):
"""Generated Kinetics 600 dataset configs."""
return DataConfig(
name='kinetics700',
num_classes=700,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=522883 if is_training else 33441,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics700_2020(is_training):
"""Generated Kinetics 600 dataset configs."""
return DataConfig(
name='kinetics700',
num_classes=700,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=535982 if is_training else 33640,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
@dataclasses.dataclass
class VideoClassificationModel(hyperparams.Config):
"""The model config."""
model_type: str = 'video_classification'
backbone: backbones_3d.Backbone3D = dataclasses.field(
default_factory=lambda: backbones_3d.Backbone3D( # pylint: disable=g-long-lambda
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()
)
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation(use_sync_bn=False)
)
dropout_rate: float = 0.2
aggregate_endpoints: bool = False
require_endpoints: Optional[Tuple[str, ...]] = None
@dataclasses.dataclass
class Losses(hyperparams.Config):
one_hot: bool = True
label_smoothing: float = 0.0
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Metrics(hyperparams.Config):
use_per_class_recall: bool = False
@dataclasses.dataclass
class VideoClassificationTask(cfg.TaskConfig):
"""The task config."""
model: VideoClassificationModel = dataclasses.field(
default_factory=VideoClassificationModel
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True, drop_remainder=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
losses: Losses = dataclasses.field(default_factory=Losses)
metrics: Metrics = dataclasses.field(default_factory=Metrics)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
freeze_backbone: bool = False
# Spatial Partitioning fields.
train_input_partition_dims: Optional[Tuple[int, ...]] = None
eval_input_partition_dims: Optional[Tuple[int, ...]] = None
def add_trainer(experiment: cfg.ExperimentConfig,
train_batch_size: int,
eval_batch_size: int,
learning_rate: float = 1.6,
train_epochs: int = 44,
warmup_epochs: int = 5):
"""Add and config a trainer to the experiment config."""
if experiment.task.train_data.num_examples <= 0:
raise ValueError('Wrong train dataset size {!r}'.format(
experiment.task.train_data))
if experiment.task.validation_data.num_examples <= 0:
raise ValueError('Wrong validation dataset size {!r}'.format(
experiment.task.validation_data))
experiment.task.train_data.global_batch_size = train_batch_size
experiment.task.validation_data.global_batch_size = eval_batch_size
steps_per_epoch = experiment.task.train_data.num_examples // train_batch_size
experiment.trainer = cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=train_epochs * steps_per_epoch,
validation_steps=experiment.task.validation_data.num_examples //
eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9,
'nesterov': True,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': learning_rate,
'decay_steps': train_epochs * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_epochs * steps_per_epoch,
'warmup_learning_rate': 0
}
}
}))
return experiment
@exp_factory.register_config_factory('video_classification')
def video_classification() -> cfg.ExperimentConfig:
"""Video classification general."""
return cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=VideoClassificationTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
@exp_factory.register_config_factory('video_classification_ucf101')
def video_classification_ucf101() -> cfg.ExperimentConfig:
"""Video classification on UCF-101 with resnet."""
train_dataset = DataConfig(
name='ucf101',
num_classes=101,
is_training=True,
split='train',
drop_remainder=True,
num_examples=9537,
temporal_stride=2,
feature_shape=(32, 224, 224, 3))
train_dataset.tfds_name = 'ucf101'
train_dataset.tfds_split = 'train'
validation_dataset = DataConfig(
name='ucf101',
num_classes=101,
is_training=True,
split='test',
drop_remainder=False,
num_examples=3783,
temporal_stride=2,
feature_shape=(32, 224, 224, 3))
validation_dataset.tfds_name = 'ucf101'
validation_dataset.tfds_split = 'test'
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(
config,
train_batch_size=64,
eval_batch_size=16,
learning_rate=0.8,
train_epochs=100)
return config
@exp_factory.register_config_factory('video_classification_kinetics400')
def video_classification_kinetics400() -> cfg.ExperimentConfig:
"""Video classification on Kinetics 400 with resnet."""
train_dataset = kinetics400(is_training=True)
validation_dataset = kinetics400(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics600')
def video_classification_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Kinetics 600 with resnet."""
train_dataset = kinetics600(is_training=True)
validation_dataset = kinetics600(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics700')
def video_classification_kinetics700() -> cfg.ExperimentConfig:
"""Video classification on Kinetics 700 with resnet."""
train_dataset = kinetics700(is_training=True)
validation_dataset = kinetics700(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics700_2020')
def video_classification_kinetics700_2020() -> cfg.ExperimentConfig:
"""Video classification on Kinetics 700 2020 with resnet."""
train_dataset = kinetics700_2020(is_training=True)
validation_dataset = kinetics700_2020(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
| 14,478 | 36.413437 | 87 | py |
models | models-master/official/vision/configs/retinanet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import retinanet as exp_cfg
class RetinaNetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('retinanet_resnetfpn_coco',),
('retinanet_spinenet_coco',),
('retinanet_mobile_coco',),
)
def test_retinanet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.RetinaNetTask)
self.assertIsInstance(config.task.model, exp_cfg.RetinaNet)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,689 | 34.957447 | 77 | py |
models | models-master/official/vision/configs/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
import dataclasses
import os
from typing import Optional, List, Sequence, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import base_config
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
# pylint: disable=missing-class-docstring
# Keep for backward compatibility.
@dataclasses.dataclass
class TfExampleDecoder(common.TfExampleDecoder):
"""A simple TF Example decoder config."""
# Keep for backward compatibility.
@dataclasses.dataclass
class TfExampleDecoderLabelMap(common.TfExampleDecoderLabelMap):
"""TF Example decoder with label map config."""
# Keep for backward compatibility.
@dataclasses.dataclass
class DataDecoder(common.DataDecoder):
"""Data decoder config."""
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
# Can choose AutoAugment and RandAugment.
aug_type: Optional[common.Augmentation] = None
# Keep for backward compatibility. Not used.
aug_policy: Optional[str] = None
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training.
Attributes:
weights: Sampling weights for each corresponding input_path. If used, then
input_path must be a config with matching keys.
"""
input_path: Union[Sequence[str], str, base_config.Config] = ''
weights: Optional[base_config.Config] = None
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: common.DataDecoder = dataclasses.field(
default_factory=common.DataDecoder
)
parser: Parser = dataclasses.field(default_factory=Parser)
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
loss_weight: float = 1.0
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
# Attribute heads of the same "prediction_tower_name" will share the same
# prediction tower. If unspecified, they will use their individual prediction
# tower.
prediction_tower_name: str = ''
# If `num_convs` or `num_filters` are not provided, it will use the parameters
# from RetinaNetHead. When several attributes share the head through setting
# the same `prediction_tower_name`, we only respect `num_convs` and
# `num_filters` from the first attribute that use the shared prediction tower
# name.
num_convs: Optional[int] = None
num_filters: Optional[int] = None
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: List[AttributeHead] = dataclasses.field(default_factory=list)
share_classification_heads: bool = False
share_level_convs: Optional[bool] = True
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
apply_nms: bool = True
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
nms_version: str = 'v2' # `v2`, `v1`, `batched`, or `tflite`.
use_cpu_nms: bool = False
soft_nms_sigma: Optional[float] = None # Only works when nms_version='v1'.
# When nms_version = `tflite`, values from tflite_post_processing need to be
# specified. They are compatible with the input arguments used by TFLite
# custom NMS op and override above parameters.
tflite_post_processing: common.TFLitePostProcessingConfig = dataclasses.field(
default_factory=common.TFLitePostProcessingConfig
)
# Return decoded boxes/scores even if apply_nms is set `True`.
return_decoded: Optional[bool] = None
# Only works when nms_version='v2'.
use_class_agnostic_nms: Optional[bool] = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = dataclasses.field(default_factory=Anchor)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder(type='fpn', fpn=decoders.FPN())
)
head: RetinaNetHead = dataclasses.field(default_factory=RetinaNetHead)
detection_generator: DetectionGenerator = dataclasses.field(
default_factory=DetectionGenerator
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class ExportConfig(hyperparams.Config):
output_normalized_coordinates: bool = False
cast_num_detections_to_float: bool = False
cast_detection_classes_to_float: bool = False
output_intermediate_features: bool = False
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = dataclasses.field(default_factory=RetinaNet)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
annotation_file: Optional[str] = None
per_category_metrics: bool = False
export_config: ExportConfig = dataclasses.field(default_factory=ExportConfig)
# If set, the COCO metrics will be computed.
use_coco_metrics: bool = True
# If set, the Waymo Open Dataset evaluator would be used.
use_wod_metrics: bool = False
# If set, freezes the backbone during training.
# TODO(crisnv) Add paper link when available.
freeze_backbone: bool = False
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
"""RetinaNet general config."""
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
norm_activation=common.NormActivation(use_sync_bn=False),
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.2)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.model.min_level == task.model.backbone.spinenet.min_level',
'task.model.max_level == task.model.backbone.spinenet.max_level',
])
return config
@exp_factory.register_config_factory('retinanet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
"""COCO object detection with mobile RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=False)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
return config
| 17,493 | 35.598326 | 95 | py |
models | models-master/official/vision/configs/maskrcnn_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maskrcnn."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import maskrcnn as exp_cfg
class MaskRCNNConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('fasterrcnn_resnetfpn_coco',),
('maskrcnn_resnetfpn_coco',),
('maskrcnn_spinenet_coco',),
('cascadercnn_spinenet_coco',),
)
def test_maskrcnn_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.MaskRCNNTask)
self.assertIsInstance(config.task.model, exp_cfg.MaskRCNN)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,723 | 34.916667 | 77 | py |
models | models-master/official/vision/configs/semantic_segmentation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semantic_segmentation."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import semantic_segmentation as exp_cfg
class ImageSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('seg_deeplabv3_pascal',),
('seg_deeplabv3plus_pascal',),
('mnv2_deeplabv3plus_cityscapes',),
('mnv2_deeplabv3_cityscapes',),
('mnv2_deeplabv3_pascal',),
('seg_resnetfpn_pascal',),
)
def test_semantic_segmentation_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.SemanticSegmentationTask)
self.assertIsInstance(config.task.model,
exp_cfg.SemanticSegmentationModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,857 | 34.730769 | 76 | py |
models | models-master/official/vision/configs/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation configuration definition."""
import dataclasses
import os
from typing import List, Optional, Sequence, Union
import numpy as np
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
from official.vision.ops import preprocess_ops
@dataclasses.dataclass
class DenseFeatureConfig(hyperparams.Config):
"""Config for dense features, such as RGB pixels, masks, heatmaps.
The dense features are encoded images in TF examples. Thus they are
1-, 3- or 4-channel. For features with another channel number (e.g.
optical flow), they could be encoded in multiple 1-channel features.
The default config is for RGB input, with mean and stddev from ImageNet
datasets. Only supports 8-bit encoded features with the maximum value = 255.
Attributes:
feature_name: The key of the feature in TF examples.
num_channels: An `int` specifying the number of channels of the feature.
mean: A list of floats in the range of [0, 255] representing the mean value
of each channel. The length of the list should match num_channels.
stddev: A list of floats in the range of [0, 255] representing the standard
deviation of each channel. The length should match num_channels.
"""
feature_name: str = 'image/encoded'
num_channels: int = 3
mean: List[float] = dataclasses.field(
default_factory=lambda: preprocess_ops.MEAN_RGB
)
stddev: List[float] = dataclasses.field(
default_factory=lambda: preprocess_ops.STDDEV_RGB
)
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
image_feature: DenseFeatureConfig = dataclasses.field(
default_factory=DenseFeatureConfig
)
output_size: List[int] = dataclasses.field(default_factory=list)
# If crop_size is specified, image will be resized first to
# output_size, then crop of size crop_size will be cropped.
crop_size: List[int] = dataclasses.field(default_factory=list)
input_path: Union[Sequence[str], str, hyperparams.Config] = ''
weights: Optional[hyperparams.Config] = None
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
# If resize_eval_groundtruth is set to False, original image sizes are used
# for eval. In that case, groundtruth_padded_size has to be specified too to
# allow for batching the variable input sizes of images.
resize_eval_groundtruth: bool = True
groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_hflip: bool = True
preserve_aspect_ratio: bool = True
aug_policy: Optional[str] = None
drop_remainder: bool = True
file_type: str = 'tfrecord'
decoder: Optional[common.DataDecoder] = dataclasses.field(
default_factory=common.DataDecoder
)
additional_dense_features: List[DenseFeatureConfig] = dataclasses.field(
default_factory=list)
@dataclasses.dataclass
class SegmentationHead(hyperparams.Config):
"""Segmentation head config."""
level: int = 3
num_convs: int = 2
num_filters: int = 256
use_depthwise_convolution: bool = False
prediction_kernel_size: int = 1
upsample_factor: int = 1
logit_activation: Optional[str] = None # None, 'sigmoid', or 'softmax'.
feature_fusion: Optional[
str] = None # None, deeplabv3plus, panoptic_fpn_fusion or pyramid_fusion
# deeplabv3plus feature fusion params
low_level: Union[int, str] = 2
low_level_num_filters: int = 48
# panoptic_fpn_fusion params
decoder_min_level: Optional[Union[int, str]] = None
decoder_max_level: Optional[Union[int, str]] = None
@dataclasses.dataclass
class MaskScoringHead(hyperparams.Config):
"""Mask Scoring head config."""
num_convs: int = 4
num_filters: int = 128
fc_input_size: List[int] = dataclasses.field(default_factory=list)
num_fcs: int = 2
fc_dims: int = 1024
use_depthwise_convolution: bool = False
@dataclasses.dataclass
class SemanticSegmentationModel(hyperparams.Config):
"""Semantic segmentation model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
head: SegmentationHead = dataclasses.field(default_factory=SegmentationHead)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder(type='identity')
)
mask_scoring_head: Optional[MaskScoringHead] = None
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
"""Loss function config."""
loss_weight: float = 1.0
label_smoothing: float = 0.0
ignore_label: int = 255
gt_is_matting_map: bool = False
class_weights: List[float] = dataclasses.field(default_factory=list)
l2_weight_decay: float = 0.0
use_groundtruth_dimension: bool = True
# If true, use binary cross entropy (sigmoid) in loss, otherwise, use
# categorical cross entropy (softmax).
use_binary_cross_entropy: bool = False
top_k_percent_pixels: float = 1.0
mask_scoring_weight: float = 1.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
"""Evaluation config."""
report_per_class_iou: bool = True
report_train_mean_iou: bool = True # Turning this off can speed up training.
@dataclasses.dataclass
class ExportConfig(hyperparams.Config):
"""Model export config."""
# Whether to rescale the predicted mask to the original image size.
rescale_output: bool = False
@dataclasses.dataclass
class SemanticSegmentationTask(cfg.TaskConfig):
"""The model config."""
model: SemanticSegmentationModel = dataclasses.field(
default_factory=SemanticSegmentationModel
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
export_config: ExportConfig = dataclasses.field(default_factory=ExportConfig)
@exp_factory.register_config_factory('semantic_segmentation')
def semantic_segmentation() -> cfg.ExperimentConfig:
"""Semantic segmentation general."""
return cfg.ExperimentConfig(
task=SemanticSegmentationTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
# PASCAL VOC 2012 Dataset
PASCAL_TRAIN_EXAMPLES = 10582
PASCAL_VAL_EXAMPLES = 1449
PASCAL_INPUT_PATH_BASE = 'gs://**/pascal_voc_seg'
@exp_factory.register_config_factory('seg_deeplabv3_pascal')
def seg_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on pascal voc with resnet deeplabv3."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [12, 24, 36] # [6, 12, 18] if output_stride = 16
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet',
dilated_resnet=backbones.DilatedResNet(
model_id=101,
output_stride=output_stride,
multigrid=multigrid,
stem_type=stem_type)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(level=level, num_convs=0),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
# TODO(arashwan): test changing size to 513 to match deeplab.
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_deeplabv3plus_pascal')
def seg_deeplabv3plus_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on pascal voc with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet',
dilated_resnet=backbones.DilatedResNet(
model_id=101,
output_stride=output_stride,
stem_type=stem_type,
multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_resnetfpn_pascal')
def seg_resnetfpn_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on pascal voc with resnet-fpn."""
train_batch_size = 256
eval_batch_size = 32
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[512, 512, 3],
min_level=3,
max_level=7,
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
head=SegmentationHead(level=3, num_convs=3),
norm_activation=common.NormActivation(
activation='swish', use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.2,
aug_scale_max=1.5),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=450 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 450 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('mnv2_deeplabv3_pascal')
def mnv2_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on pascal with mobilenetv2 deeplabv3."""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = []
level = int(np.math.log2(output_stride))
pool_kernel_size = []
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='mobilenet',
mobilenet=backbones.MobileNet(
model_id='MobileNetV2', output_stride=output_stride)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
dilation_rates=aspp_dilation_rates,
pool_kernel_size=pool_kernel_size)),
head=SegmentationHead(level=level, num_convs=0),
norm_activation=common.NormActivation(
activation='relu',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# mobilenetv2
init_checkpoint='gs://tf_model_garden/cloud/vision-2.0/deeplab/deeplabv3_mobilenetv2_coco/best_ckpt-63',
init_checkpoint_modules=['backbone', 'decoder']),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=30000,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
best_checkpoint_eval_metric='mean_iou',
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_metric_comp='higher',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007 * train_batch_size / 16,
'decay_steps': 30000,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Cityscapes Dataset (Download and process the dataset yourself)
CITYSCAPES_TRAIN_EXAMPLES = 2975
CITYSCAPES_VAL_EXAMPLES = 500
CITYSCAPES_INPUT_PATH_BASE = 'cityscapes'
@exp_factory.register_config_factory('seg_deeplabv3plus_cityscapes')
def seg_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Image segmentation on cityscapes with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
# Cityscapes uses only 19 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=19,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet',
dilated_resnet=backbones.DilatedResNet(
model_id=101,
output_stride=output_stride,
stem_type=stem_type,
multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
dilation_rates=aspp_dilation_rates,
pool_kernel_size=[512, 1024])),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
'train_fine**'),
crop_size=[512, 1024],
output_size=[1024, 2048],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
output_size=[1024, 2048],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=500 * steps_per_epoch,
validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'decay_steps': 500 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('mnv2_deeplabv3_cityscapes')
def mnv2_deeplabv3_cityscapes() -> cfg.ExperimentConfig:
"""Image segmentation on cityscapes with mobilenetv2 deeplabv3."""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = []
pool_kernel_size = [512, 1024]
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
# Cityscapes uses only 19 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=19,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='mobilenet',
mobilenet=backbones.MobileNet(
model_id='MobileNetV2', output_stride=output_stride)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
dilation_rates=aspp_dilation_rates,
pool_kernel_size=pool_kernel_size)),
head=SegmentationHead(level=level, num_convs=0),
norm_activation=common.NormActivation(
activation='relu',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
'train_fine**'),
crop_size=[512, 1024],
output_size=[1024, 2048],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
output_size=[1024, 2048],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
# Coco pre-trained mobilenetv2 checkpoint
init_checkpoint='gs://tf_model_garden/cloud/vision-2.0/deeplab/deeplabv3_mobilenetv2_coco/best_ckpt-63',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=100000,
validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
best_checkpoint_eval_metric='mean_iou',
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_metric_comp='higher',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'decay_steps': 100000,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('mnv2_deeplabv3plus_cityscapes')
def mnv2_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Image segmentation on cityscapes with mobilenetv2 deeplabv3plus."""
config = mnv2_deeplabv3_cityscapes()
config.task.model.head = SegmentationHead(
level=4,
num_convs=2,
feature_fusion='deeplabv3plus',
use_depthwise_convolution=True,
low_level='2/depthwise',
low_level_num_filters=48)
config.task.model.backbone.mobilenet.output_intermediate_endpoints = True
return config
| 30,579 | 37.659924 | 114 | py |
models | models-master/official/vision/configs/common.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common configurations."""
import dataclasses
from typing import List, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.modeling import hyperparams
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
"""A simple TF Example decoder config."""
regenerate_source_id: bool = False
mask_binarize_threshold: Optional[float] = None
attribute_names: List[str] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
"""TF Example decoder with label map config."""
regenerate_source_id: bool = False
mask_binarize_threshold: Optional[float] = None
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
"""Data decoder config.
Attributes:
type: 'str', type of data decoder be used, one of the fields below.
simple_decoder: simple TF Example decoder config.
label_map_decoder: TF Example decoder with label map config.
"""
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class RandAugment(hyperparams.Config):
"""Configuration for RandAugment."""
num_layers: int = 2
magnitude: float = 10
cutout_const: float = 40
translate_const: float = 10
magnitude_std: float = 0.0
prob_to_apply: Optional[float] = None
exclude_ops: List[str] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class AutoAugment(hyperparams.Config):
"""Configuration for AutoAugment."""
augmentation_name: str = 'v0'
cutout_const: float = 100
translate_const: float = 250
@dataclasses.dataclass
class RandomErasing(hyperparams.Config):
"""Configuration for RandomErasing."""
probability: float = 0.25
min_area: float = 0.02
max_area: float = 1 / 3
min_aspect: float = 0.3
max_aspect = None
min_count = 1
max_count = 1
trials = 10
@dataclasses.dataclass
class MixupAndCutmix(hyperparams.Config):
"""Configuration for MixupAndCutmix."""
mixup_alpha: float = .8
cutmix_alpha: float = 1.
prob: float = 1.0
switch_prob: float = 0.5
label_smoothing: float = 0.1
@dataclasses.dataclass
class Augmentation(hyperparams.OneOfConfig):
"""Configuration for input data augmentation.
Attributes:
type: 'str', type of augmentation be used, one of the fields below.
randaug: RandAugment config.
autoaug: AutoAugment config.
"""
type: Optional[str] = None
randaug: RandAugment = dataclasses.field(default_factory=RandAugment)
autoaug: AutoAugment = dataclasses.field(default_factory=AutoAugment)
@dataclasses.dataclass
class NormActivation(hyperparams.Config):
activation: str = 'relu'
use_sync_bn: bool = True
norm_momentum: float = 0.99
norm_epsilon: float = 0.001
@dataclasses.dataclass
class PseudoLabelDataConfig(cfg.DataConfig):
"""Psuedo Label input config for training."""
input_path: str = ''
data_ratio: float = 1.0 # Per-batch ratio of pseudo-labeled to labeled data.
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 10000
cycle_length: int = 10
aug_rand_hflip: bool = True
aug_type: Optional[
Augmentation] = None # Choose from AutoAugment and RandAugment.
file_type: str = 'tfrecord'
# Keep for backward compatibility.
aug_policy: Optional[str] = None # None, 'autoaug', or 'randaug'.
randaug_magnitude: Optional[int] = 10
@dataclasses.dataclass
class TFLitePostProcessingConfig(hyperparams.Config):
"""TFLite Post Processing config for inference."""
max_detections: int = 200
max_classes_per_detection: int = 5
# Regular NMS run in a multi-class fashion and is slow. Setting it to False
# uses class-agnostic NMS, which is faster.
use_regular_nms: bool = False
nms_score_threshold: float = 0.1
nms_iou_threshold: float = 0.5
# Whether to normalize coordinates of anchors to [0, 1]. If setting to True,
# coordinates of output boxes is also normalized but latency increases.
normalize_anchor_coordinates: Optional[bool] = False
# Whether to omit the final nms placeholder op. If set to True, the output
# will be a tuple of boxes, scores result right before the NMS operation.
omit_nms: Optional[bool] = False
| 5,014 | 30.34375 | 79 | py |
models | models-master/official/vision/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.vision.configs import backbones
from official.vision.configs import backbones_3d
from official.vision.configs import common
from official.vision.configs import image_classification
from official.vision.configs import maskrcnn
from official.vision.configs import retinanet
from official.vision.configs import semantic_segmentation
from official.vision.configs import video_classification
| 1,045 | 40.84 | 74 | py |
models | models-master/official/vision/configs/maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R-CNN(-RS) configuration definition."""
import dataclasses
import os
from typing import List, Optional, Sequence, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_rand_vflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_type: Optional[
common.Augmentation] = None # Choose from AutoAugment and RandAugment.
skip_crowd_during_training: bool = True
max_num_instances: int = 100
rpn_match_threshold: float = 0.7
rpn_unmatched_threshold: float = 0.3
rpn_batch_size_per_im: int = 256
rpn_fg_fraction: float = 0.5
mask_crop_size: int = 112
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: Union[Sequence[str], str, hyperparams.Config] = ''
weights: Optional[hyperparams.Config] = None
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: common.DataDecoder = dataclasses.field(
default_factory=common.DataDecoder
)
parser: Parser = dataclasses.field(default_factory=Parser)
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
drop_remainder: bool = True
# Number of examples in the data set, it's used to create the annotation file.
num_examples: int = -1
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 1
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 8.0
@dataclasses.dataclass
class RPNHead(hyperparams.Config):
num_convs: int = 1
num_filters: int = 256
use_separable_conv: bool = False
@dataclasses.dataclass
class DetectionHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
num_fcs: int = 1
fc_dims: int = 1024
class_agnostic_bbox_pred: bool = False # Has to be True for Cascade RCNN.
# If additional IoUs are passed in 'cascade_iou_thresholds'
# then ensemble the class probabilities from all heads.
cascade_class_ensemble: bool = False
@dataclasses.dataclass
class ROIGenerator(hyperparams.Config):
pre_nms_top_k: int = 2000
pre_nms_score_threshold: float = 0.0
pre_nms_min_size_threshold: float = 0.0
nms_iou_threshold: float = 0.7
num_proposals: int = 1000
test_pre_nms_top_k: int = 1000
test_pre_nms_score_threshold: float = 0.0
test_pre_nms_min_size_threshold: float = 0.0
test_nms_iou_threshold: float = 0.7
test_num_proposals: int = 1000
use_batched_nms: bool = False
@dataclasses.dataclass
class ROISampler(hyperparams.Config):
mix_gt_boxes: bool = True
num_sampled_rois: int = 512
foreground_fraction: float = 0.25
foreground_iou_threshold: float = 0.5
background_iou_high_threshold: float = 0.5
background_iou_low_threshold: float = 0.0
# IoU thresholds for additional FRCNN heads in Cascade mode.
# `foreground_iou_threshold` is the first threshold.
cascade_iou_thresholds: Optional[List[float]] = None
@dataclasses.dataclass
class ROIAligner(hyperparams.Config):
crop_size: int = 7
sample_offset: float = 0.5
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
apply_nms: bool = True
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
nms_version: str = 'v2' # `v2`, `v1`, `batched`
use_cpu_nms: bool = False
soft_nms_sigma: Optional[float] = None # Only works when nms_version='v1'.
use_sigmoid_probability: bool = False
@dataclasses.dataclass
class MaskHead(hyperparams.Config):
upsample_factor: int = 2
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
class_agnostic: bool = False
@dataclasses.dataclass
class MaskSampler(hyperparams.Config):
num_sampled_masks: int = 128
@dataclasses.dataclass
class MaskROIAligner(hyperparams.Config):
crop_size: int = 14
sample_offset: float = 0.5
@dataclasses.dataclass
class MaskRCNN(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 2
max_level: int = 6
anchor: Anchor = dataclasses.field(default_factory=Anchor)
include_mask: bool = True
outer_boxes_scale: float = 1.0
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone(
type='resnet', resnet=backbones.ResNet()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder(type='fpn', fpn=decoders.FPN())
)
rpn_head: RPNHead = dataclasses.field(default_factory=RPNHead)
detection_head: DetectionHead = dataclasses.field(
default_factory=DetectionHead
)
roi_generator: ROIGenerator = dataclasses.field(default_factory=ROIGenerator)
roi_sampler: ROISampler = dataclasses.field(default_factory=ROISampler)
roi_aligner: ROIAligner = dataclasses.field(default_factory=ROIAligner)
detection_generator: DetectionGenerator = dataclasses.field(
default_factory=DetectionGenerator
)
mask_head: Optional[MaskHead] = dataclasses.field(default_factory=MaskHead)
mask_sampler: Optional[MaskSampler] = dataclasses.field(
default_factory=MaskSampler
)
mask_roi_aligner: Optional[MaskROIAligner] = dataclasses.field(
default_factory=MaskROIAligner
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.997, norm_epsilon=0.0001, use_sync_bn=True
)
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
loss_weight: float = 1.0
rpn_huber_loss_delta: float = 1. / 9.
frcnn_huber_loss_delta: float = 1.
frcnn_class_use_binary_cross_entropy: bool = False
frcnn_class_loss_top_k_percent: float = 1.
l2_weight_decay: float = 0.0
rpn_score_weight: float = 1.0
rpn_box_weight: float = 1.0
frcnn_class_weight: float = 1.0
frcnn_box_weight: float = 1.0
mask_weight: float = 1.0
@dataclasses.dataclass
class MaskRCNNTask(cfg.TaskConfig):
model: MaskRCNN = dataclasses.field(default_factory=MaskRCNN)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
annotation_file: Optional[str] = None
per_category_metrics: bool = False
# If set, we only use masks for the specified class IDs.
allowed_mask_class_ids: Optional[List[int]] = None
# If set, the COCO metrics will be computed.
use_coco_metrics: bool = True
# If set, the Waymo Open Dataset evaluator would be used.
use_wod_metrics: bool = False
# If set, use instance metrics (AP, mask AP, etc.) computed by an efficient
# approximation algorithm with TPU compatible operations.
use_approx_instance_metrics: bool = False
# If set, freezes the backbone during training.
# TODO(crisnv) Add paper link when available.
freeze_backbone: bool = False
COCO_INPUT_PATH_BASE = 'coco'
@exp_factory.register_config_factory('fasterrcnn_resnetfpn_coco')
def fasterrcnn_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Faster R-CNN."""
steps_per_epoch = 500
coco_val_samples = 5000
train_batch_size = 64
eval_batch_size = 8
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=MaskRCNNTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
num_classes=91,
input_size=[1024, 1024, 3],
include_mask=False,
mask_head=None,
mask_sampler=None,
mask_roi_aligner=None),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.25)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=22500,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [15000, 20000],
'values': [0.12, 0.012, 0.0012],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('maskrcnn_resnetfpn_coco')
def maskrcnn_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Mask R-CNN."""
steps_per_epoch = 500
coco_val_samples = 5000
train_batch_size = 64
eval_batch_size = 8
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype='bfloat16', enable_xla=True),
task=MaskRCNNTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
num_classes=91, input_size=[1024, 1024, 3], include_mask=True),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.25)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=22500,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [15000, 20000],
'values': [0.12, 0.012, 0.0012],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('maskrcnn_spinenet_coco')
def maskrcnn_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Mask R-CNN with SpineNet backbone."""
steps_per_epoch = 463
coco_val_samples = 5000
train_batch_size = 256
eval_batch_size = 8
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=MaskRCNNTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49',
min_level=3,
max_level=7,
)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(use_sync_bn=True),
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7,
include_mask=True),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=steps_per_epoch * 350,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
steps_per_epoch * 320, steps_per_epoch * 340
],
'values': [0.32, 0.032, 0.0032],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.model.min_level == task.model.backbone.spinenet.min_level',
'task.model.max_level == task.model.backbone.spinenet.max_level',
])
return config
@exp_factory.register_config_factory('cascadercnn_spinenet_coco')
def cascadercnn_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Cascade RCNN-RS with SpineNet backbone."""
steps_per_epoch = 463
coco_val_samples = 5000
train_batch_size = 256
eval_batch_size = 8
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=MaskRCNNTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49',
min_level=3,
max_level=7,
)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
roi_sampler=ROISampler(cascade_iou_thresholds=[0.6, 0.7]),
detection_head=DetectionHead(
class_agnostic_bbox_pred=True, cascade_class_ensemble=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7,
include_mask=True),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.5)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=steps_per_epoch * 500,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
steps_per_epoch * 475, steps_per_epoch * 490
],
'values': [0.32, 0.032, 0.0032],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.model.min_level == task.model.backbone.spinenet.min_level',
'task.model.max_level == task.model.backbone.spinenet.max_level',
])
return config
@exp_factory.register_config_factory('maskrcnn_mobilenet_coco')
def maskrcnn_mobilenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Mask R-CNN with MobileNet backbone."""
steps_per_epoch = 232
coco_val_samples = 5000
train_batch_size = 512
eval_batch_size = 512
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=MaskRCNNTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
backbone=backbones.Backbone(
type='mobilenet',
mobilenet=backbones.MobileNet(model_id='MobileNetV2')),
decoder=decoders.Decoder(
type='fpn',
fpn=decoders.FPN(num_filters=128, use_separable_conv=True)),
rpn_head=RPNHead(use_separable_conv=True,
num_filters=128), # 1/2 of original channels.
detection_head=DetectionHead(
use_separable_conv=True, num_filters=128,
fc_dims=512), # 1/2 of original channels.
mask_head=MaskHead(use_separable_conv=True,
num_filters=128), # 1/2 of original channels.
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
activation='relu6',
norm_momentum=0.99,
norm_epsilon=0.001,
use_sync_bn=True),
num_classes=91,
input_size=[512, 512, 3],
min_level=3,
max_level=6,
include_mask=True),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=steps_per_epoch * 350,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
steps_per_epoch * 320, steps_per_epoch * 340
],
'values': [0.32, 0.032, 0.0032],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
return config
| 23,705 | 35.696594 | 95 | py |
models | models-master/official/vision/configs/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones configurations."""
import dataclasses
from typing import List, Optional, Tuple
from official.modeling import hyperparams
@dataclasses.dataclass
class Transformer(hyperparams.Config):
"""Transformer config."""
mlp_dim: int = 1
num_heads: int = 1
num_layers: int = 1
attention_dropout_rate: float = 0.0
dropout_rate: float = 0.0
@dataclasses.dataclass
class VisionTransformer(hyperparams.Config):
"""VisionTransformer config."""
model_name: str = 'vit-b16'
# pylint: disable=line-too-long
pooler: str = 'token' # 'token', 'gap' or 'none'. If set to 'token', an extra classification token is added to sequence.
# pylint: enable=line-too-long
representation_size: int = 0
hidden_size: int = 1
patch_size: int = 16
transformer: Transformer = dataclasses.field(default_factory=Transformer)
init_stochastic_depth_rate: float = 0.0
original_init: bool = True
pos_embed_shape: Optional[Tuple[int, int]] = None
# If output encoded tokens sequence when pooler is `none`.
output_encoded_tokens: bool = True
# If output encoded tokens 2D feature map.
output_2d_feature_maps: bool = False
# Adding Layerscale to each Encoder block https://arxiv.org/abs/2204.07118
layer_scale_init_value: float = 0.0
# Transformer encoder spatial partition dimensions.
transformer_partition_dims: Optional[Tuple[int, int, int, int]] = None
@dataclasses.dataclass
class ResNet(hyperparams.Config):
"""ResNet config."""
model_id: int = 50
depth_multiplier: float = 1.0
stem_type: str = 'v0'
se_ratio: float = 0.0
stochastic_depth_drop_rate: float = 0.0
scale_stem: bool = True
resnetd_shortcut: bool = False
replace_stem_max_pool: bool = False
bn_trainable: bool = True
@dataclasses.dataclass
class DilatedResNet(hyperparams.Config):
"""DilatedResNet config."""
model_id: int = 50
output_stride: int = 16
multigrid: Optional[List[int]] = None
stem_type: str = 'v0'
last_stage_repeats: int = 1
se_ratio: float = 0.0
stochastic_depth_drop_rate: float = 0.0
resnetd_shortcut: bool = False
replace_stem_max_pool: bool = False
@dataclasses.dataclass
class EfficientNet(hyperparams.Config):
"""EfficientNet config."""
model_id: str = 'b0'
se_ratio: float = 0.0
stochastic_depth_drop_rate: float = 0.0
@dataclasses.dataclass
class MobileNet(hyperparams.Config):
"""Mobilenet config."""
model_id: str = 'MobileNetV2'
filter_size_scale: float = 1.0
stochastic_depth_drop_rate: float = 0.0
output_stride: Optional[int] = None
output_intermediate_endpoints: bool = False
@dataclasses.dataclass
class SpineNet(hyperparams.Config):
"""SpineNet config."""
model_id: str = '49'
stochastic_depth_drop_rate: float = 0.0
min_level: int = 3
max_level: int = 7
@dataclasses.dataclass
class SpineNetMobile(hyperparams.Config):
"""SpineNet config."""
model_id: str = '49'
stochastic_depth_drop_rate: float = 0.0
se_ratio: float = 0.2
expand_ratio: int = 6
min_level: int = 3
max_level: int = 7
# If use_keras_upsampling_2d is True, model uses UpSampling2D keras layer
# instead of optimized custom TF op. It makes model be more keras style. We
# set this flag to True when we apply QAT from model optimization toolkit
# that requires the model should use keras layers.
use_keras_upsampling_2d: bool = False
@dataclasses.dataclass
class RevNet(hyperparams.Config):
"""RevNet config."""
# Specifies the depth of RevNet.
model_id: int = 56
@dataclasses.dataclass
class MobileDet(hyperparams.Config):
"""Mobiledet config."""
model_id: str = 'MobileDetCPU'
filter_size_scale: float = 1.0
@dataclasses.dataclass
class Backbone(hyperparams.OneOfConfig):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, one of the fields below.
resnet: resnet backbone config.
dilated_resnet: dilated resnet backbone for semantic segmentation config.
revnet: revnet backbone config.
efficientnet: efficientnet backbone config.
spinenet: spinenet backbone config.
spinenet_mobile: mobile spinenet backbone config.
mobilenet: mobilenet backbone config.
mobiledet: mobiledet backbone config.
vit: vision transformer backbone config.
"""
type: Optional[str] = None
resnet: ResNet = dataclasses.field(default_factory=ResNet)
dilated_resnet: DilatedResNet = dataclasses.field(
default_factory=DilatedResNet
)
revnet: RevNet = dataclasses.field(default_factory=RevNet)
efficientnet: EfficientNet = dataclasses.field(default_factory=EfficientNet)
spinenet: SpineNet = dataclasses.field(default_factory=SpineNet)
spinenet_mobile: SpineNetMobile = dataclasses.field(
default_factory=SpineNetMobile
)
mobilenet: MobileNet = dataclasses.field(default_factory=MobileNet)
mobiledet: MobileDet = dataclasses.field(default_factory=MobileDet)
vit: VisionTransformer = dataclasses.field(default_factory=VisionTransformer)
| 5,547 | 31.255814 | 123 | py |
models | models-master/official/vision/configs/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import image_classification as exp_cfg
class ImageClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet_imagenet',),
('resnet_rs_imagenet',),
('revnet_imagenet',),
('mobilenet_imagenet',),
('deit_imagenet_pretrain',),
('vit_imagenet_pretrain',),
('vit_imagenet_finetune',),
)
def test_image_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.ImageClassificationTask)
self.assertIsInstance(config.task.model,
exp_cfg.ImageClassificationModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,857 | 34.730769 | 78 | py |
models | models-master/official/vision/configs/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification configuration definition."""
import dataclasses
import os
from typing import List, Optional, Tuple, Union, Sequence
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
from official.vision.configs import backbones
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: Union[Sequence[str], str, hyperparams.Config] = ''
weights: Optional[hyperparams.base_config.Config] = None
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 10000
cycle_length: int = 10
is_multilabel: bool = False
aug_rand_hflip: bool = True
aug_crop: Optional[bool] = True
crop_area_range: Optional[Tuple[float, float]] = (0.08, 1.0)
aug_type: Optional[
common.Augmentation] = None # Choose from AutoAugment and RandAugment.
three_augment: bool = False
color_jitter: float = 0.
random_erasing: Optional[common.RandomErasing] = None
file_type: str = 'tfrecord'
image_field_key: str = 'image/encoded'
label_field_key: str = 'image/class/label'
decode_jpeg_only: bool = True
mixup_and_cutmix: Optional[common.MixupAndCutmix] = None
decoder: Optional[common.DataDecoder] = dataclasses.field(
default_factory=common.DataDecoder
)
# Keep for backward compatibility.
aug_policy: Optional[str] = None # None, 'autoaug', or 'randaug'.
randaug_magnitude: Optional[int] = 10
# Determines ratio between the side of the cropped image and the short side of
# the original image.
center_crop_fraction: Optional[float] = 0.875
# Interpolation method for resizing image in Parser for both training and eval
tf_resize_method: str = 'bilinear'
# Repeat augmentation puts multiple augmentations of the same image in a batch
# https://arxiv.org/abs/1902.05509
repeated_augment: Optional[int] = None
@dataclasses.dataclass
class ImageClassificationModel(hyperparams.Config):
"""The model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
dropout_rate: float = 0.0
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation(use_sync_bn=False)
)
# Adds a BatchNormalization layer pre-GlobalAveragePooling in classification
add_head_batch_norm: bool = False
kernel_initializer: str = 'random_uniform'
# Whether to output softmax results instead of logits.
output_softmax: bool = False
@dataclasses.dataclass
class Losses(hyperparams.Config):
loss_weight: float = 1.0
one_hot: bool = True
label_smoothing: float = 0.0
l2_weight_decay: float = 0.0
soft_labels: bool = False
# Converts multi-class classification to multi-label classification. Weights
# each object class equally in the loss function, ignoring their size.
use_binary_cross_entropy: bool = False
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
top_k: int = 5
precision_and_recall_thresholds: Optional[List[float]] = None
report_per_class_precision_and_recall: bool = False
@dataclasses.dataclass
class ImageClassificationTask(cfg.TaskConfig):
"""The task config."""
model: ImageClassificationModel = dataclasses.field(
default_factory=ImageClassificationModel
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
train_input_partition_dims: Optional[List[int]] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: Optional[List[int]] = dataclasses.field(
default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
model_output_keys: Optional[List[int]] = dataclasses.field(
default_factory=list)
freeze_backbone: bool = False
@exp_factory.register_config_factory('image_classification')
def image_classification() -> cfg.ExperimentConfig:
"""Image classification general."""
return cfg.ExperimentConfig(
task=ImageClassificationTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
@exp_factory.register_config_factory('resnet_imagenet')
def image_classification_imagenet() -> cfg.ExperimentConfig:
"""Image classification on imagenet with resnet."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[224, 224, 3],
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=90 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
30 * steps_per_epoch, 60 * steps_per_epoch,
80 * steps_per_epoch
],
'values': [
0.1 * train_batch_size / 256,
0.01 * train_batch_size / 256,
0.001 * train_batch_size / 256,
0.0001 * train_batch_size / 256,
]
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('resnet_rs_imagenet')
def image_classification_imagenet_resnetrs() -> cfg.ExperimentConfig:
"""Image classification on imagenet with resnet-rs."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[160, 160, 3],
backbone=backbones.Backbone(
type='resnet',
resnet=backbones.ResNet(
model_id=50,
stem_type='v1',
resnetd_shortcut=True,
replace_stem_max_pool=True,
se_ratio=0.25,
stochastic_depth_drop_rate=0.0)),
dropout_rate=0.25,
norm_activation=common.NormActivation(
norm_momentum=0.0,
norm_epsilon=1e-5,
use_sync_bn=False,
activation='swish')),
losses=Losses(l2_weight_decay=4e-5, label_smoothing=0.1),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
aug_type=common.Augmentation(
type='randaug', randaug=common.RandAugment(magnitude=10))),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=350 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'ema': {
'average_decay': 0.9999,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.6,
'decay_steps': 350 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('revnet_imagenet')
def image_classification_imagenet_revnet() -> cfg.ExperimentConfig:
"""Returns a revnet config for image classification on imagenet."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[224, 224, 3],
backbone=backbones.Backbone(
type='revnet', revnet=backbones.RevNet(model_id=56)),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False),
add_head_batch_norm=True),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=90 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
30 * steps_per_epoch, 60 * steps_per_epoch,
80 * steps_per_epoch
],
'values': [0.8, 0.08, 0.008, 0.0008]
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('mobilenet_imagenet')
def image_classification_imagenet_mobilenet() -> cfg.ExperimentConfig:
"""Image classification on imagenet with mobilenet."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
dropout_rate=0.2,
input_size=[224, 224, 3],
backbone=backbones.Backbone(
type='mobilenet',
mobilenet=backbones.MobileNet(
model_id='MobileNetV2', filter_size_scale=1.0)),
norm_activation=common.NormActivation(
norm_momentum=0.997, norm_epsilon=1e-3, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-5, label_smoothing=0.1),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=500 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'rmsprop',
'rmsprop': {
'rho': 0.9,
'momentum': 0.9,
'epsilon': 0.002,
}
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate':
0.008 * (train_batch_size // 128),
'decay_steps':
int(2.5 * steps_per_epoch),
'decay_rate':
0.98,
'staircase':
True
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('deit_imagenet_pretrain')
def image_classification_imagenet_deit_pretrain() -> cfg.ExperimentConfig:
"""Image classification on imagenet with vision transformer."""
train_batch_size = 4096 # originally was 1024 but 4096 better for tpu v3-32
eval_batch_size = 4096 # originally was 1024 but 4096 better for tpu v3-32
label_smoothing = 0.1
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[224, 224, 3],
kernel_initializer='zeros',
backbone=backbones.Backbone(
type='vit',
vit=backbones.VisionTransformer(
model_name='vit-b16',
representation_size=768,
init_stochastic_depth_rate=0.1,
original_init=False,
transformer=backbones.Transformer(
dropout_rate=0.0, attention_dropout_rate=0.0)))),
losses=Losses(
l2_weight_decay=0.0,
label_smoothing=label_smoothing,
one_hot=False,
soft_labels=True),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
aug_type=common.Augmentation(
type='randaug',
randaug=common.RandAugment(
magnitude=9, exclude_ops=['Cutout'])),
mixup_and_cutmix=common.MixupAndCutmix(
label_smoothing=label_smoothing)),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=300 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.05,
'include_in_weight_decay': r'.*(kernel|weight):0$',
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.0005 * train_batch_size / 512,
'decay_steps': 300 * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('vit_imagenet_pretrain')
def image_classification_imagenet_vit_pretrain() -> cfg.ExperimentConfig:
"""Image classification on imagenet with vision transformer."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[224, 224, 3],
kernel_initializer='zeros',
backbone=backbones.Backbone(
type='vit',
vit=backbones.VisionTransformer(
model_name='vit-b16', representation_size=768))),
losses=Losses(l2_weight_decay=0.0),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=300 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.3,
'include_in_weight_decay': r'.*(kernel|weight):0$',
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.003 * train_batch_size / 4096,
'decay_steps': 300 * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 10000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('vit_imagenet_finetune')
def image_classification_imagenet_vit_finetune() -> cfg.ExperimentConfig:
"""Image classification on imagenet with vision transformer."""
train_batch_size = 512
eval_batch_size = 512
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=ImageClassificationTask(
model=ImageClassificationModel(
num_classes=1001,
input_size=[384, 384, 3],
backbone=backbones.Backbone(
type='vit',
vit=backbones.VisionTransformer(model_name='vit-b16'))),
losses=Losses(l2_weight_decay=0.0),
train_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=20000,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9,
'global_clipnorm': 1.0,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.003,
'decay_steps': 20000,
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 24,563 | 37.867089 | 82 | py |
models | models-master/official/vision/configs/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders configurations."""
import dataclasses
from typing import List, Optional
# Import libraries
from official.modeling import hyperparams
@dataclasses.dataclass
class Identity(hyperparams.Config):
"""Identity config."""
pass
@dataclasses.dataclass
class FPN(hyperparams.Config):
"""FPN config."""
num_filters: int = 256
fusion_type: str = 'sum'
use_separable_conv: bool = False
use_keras_layer: bool = False
@dataclasses.dataclass
class NASFPN(hyperparams.Config):
"""NASFPN config."""
num_filters: int = 256
num_repeats: int = 5
use_separable_conv: bool = False
@dataclasses.dataclass
class ASPP(hyperparams.Config):
"""ASPP config."""
level: int = 4
dilation_rates: List[int] = dataclasses.field(default_factory=list)
dropout_rate: float = 0.0
num_filters: int = 256
use_depthwise_convolution: bool = False
pool_kernel_size: Optional[List[int]] = None # Use global average pooling.
spp_layer_version: str = 'v1'
output_tensor: bool = False
@dataclasses.dataclass
class Decoder(hyperparams.OneOfConfig):
"""Configuration for decoders.
Attributes:
type: 'str', type of decoder be used, one of the fields below.
fpn: fpn config.
"""
type: Optional[str] = None
fpn: FPN = dataclasses.field(default_factory=FPN)
nasfpn: NASFPN = dataclasses.field(default_factory=NASFPN)
identity: Identity = dataclasses.field(default_factory=Identity)
aspp: ASPP = dataclasses.field(default_factory=ASPP)
| 2,080 | 27.506849 | 77 | py |
models | models-master/official/vision/dataloaders/parser.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The generic parser interface."""
import abc
class Parser(object):
"""Parses data and produces tensors to be consumed by models."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _parse_train_data(self, decoded_tensors):
"""Generates images and labels that are usable for model training.
Args:
decoded_tensors: a dict of Tensors produced by the decoder.
Returns:
images: the image tensor.
labels: a dict of Tensors that contains labels.
"""
pass
@abc.abstractmethod
def _parse_eval_data(self, decoded_tensors):
"""Generates images and labels that are usable for model evaluation.
Args:
decoded_tensors: a dict of Tensors produced by the decoder.
Returns:
images: the image tensor.
labels: a dict of Tensors that contains labels.
"""
pass
def parse_fn(self, is_training):
"""Returns a parse fn that reads and parses raw tensors from the decoder.
Args:
is_training: a `bool` to indicate whether it is in training mode.
Returns:
parse: a `callable` that takes the serialized example and generate the
images, labels tuple where labels is a dict of Tensors that contains
labels.
"""
def parse(decoded_tensors):
"""Parses the serialized example data."""
if is_training:
return self._parse_train_data(decoded_tensors)
else:
return self._parse_eval_data(decoded_tensors)
return parse
@classmethod
def inference_fn(cls, inputs):
"""Parses inputs for predictions.
Args:
inputs: A Tensor, or dictionary of Tensors.
Returns:
processed_inputs: An input tensor to the model.
"""
pass
| 2,315 | 27.243902 | 77 | py |
models | models-master/official/vision/dataloaders/tfds_detection_decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS detection decoders."""
import tensorflow as tf
from official.vision.dataloaders import decoder
class MSCOCODecoder(decoder.Decoder):
"""A tf.Example decoder for tfds coco datasets."""
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a dictionary example produced by tfds.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- source_id: a string scalar tensor.
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
"""
decoded_tensors = {
'source_id': tf.strings.as_string(serialized_example['image/id']),
'image': serialized_example['image'],
'height': tf.cast(tf.shape(serialized_example['image'])[0], tf.int64),
'width': tf.cast(tf.shape(serialized_example['image'])[1], tf.int64),
'groundtruth_classes': serialized_example['objects']['label'],
'groundtruth_is_crowd': serialized_example['objects']['is_crowd'],
'groundtruth_area': tf.cast(
serialized_example['objects']['area'], tf.float32),
'groundtruth_boxes': serialized_example['objects']['bbox'],
}
return decoded_tensors
TFDS_ID_TO_DECODER_MAP = {
'coco/2017': MSCOCODecoder,
'coco/2014': MSCOCODecoder,
'coco': MSCOCODecoder,
'scenic:objects365': MSCOCODecoder,
}
| 2,314 | 36.33871 | 78 | py |
models | models-master/official/vision/dataloaders/input_reader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset reader for vision model garden."""
from typing import Any, Callable, Mapping, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import input_reader
def build_weighted_sampling_combine_fn(
weights: Mapping[Any, Any]) -> Callable[[tf.data.Dataset], tf.data.Dataset]:
"""Builds a combine_fn using weighted sampling."""
def combine_fn(datasets: Mapping[Any, tf.data.Dataset]) -> tf.data.Dataset:
"""Combines multiple datasets using weighted sampling."""
ds = []
ws = []
for k, dataset in datasets.items():
ds.append(dataset)
ws.append(weights[k])
return tf.data.Dataset.sample_from_datasets(
ds, ws, stop_on_empty_dataset=True)
return combine_fn
def create_combine_fn(
params: cfg.DataConfig
) -> Union[None, Callable[[tf.data.Dataset], tf.data.Dataset]]:
"""Creates and returns a combine_fn for dataset mixing."""
if params.is_training and params.weights:
# Combine multiple datasets using weighted sampling.
if (not isinstance(params.input_path, cfg.base_config.Config) or
not isinstance(params.weights, cfg.base_config.Config)):
raise ValueError(
'input_path and weights must both be a Config to use weighted '
'sampling.')
input_paths = params.input_path.as_dict()
weights = params.weights.as_dict()
if len(input_paths) != len(weights):
raise ValueError(
'The number of input_path and weights must be the same, but got %d '
'input_paths and %d weights.' % (len(input_paths), len(weights)))
for k in input_paths.keys():
if k not in weights:
raise ValueError(
'input_path key \'%s\' does not have a corresponding weight.' % k)
return build_weighted_sampling_combine_fn(weights)
return None
def calculate_batch_sizes(total_batch_size: int,
pseudo_label_ratio: float,
pseudo_label_batch_size: int = 0) -> Tuple[int, int]:
"""Calculates labeled and pseudo-labeled dataset batch sizes.
Returns (labeled_batch_size, pseudo_labeled_batch_size) given a
total batch size and pseudo-label data ratio.
Args:
total_batch_size: The total batch size for all data.
pseudo_label_ratio: A float ratio of pseudo-labeled to labeled data in a
batch. If it is negative, use `pseudo_label_batch_size` instead.
pseudo_label_batch_size: The batch size of pseudo-labeled data. It is ignored
if `pseudo_label_ratio` is valid. If not, it will be used and it cannot be
larger than total global batch size or less than 0 if pseudo_label_ratio is
also less than 0.
Returns:
(labeled_batch_size, pseudo_labeled_batch_size) as ints.
Raises:
ValueError: If total_batch_size is negative, or both If pseudo_label_ratio
is negative and pseudo-label global_batch_size is negative or larger than
total batch size.
"""
if total_batch_size < 0:
raise ValueError('Invalid total_batch_size: {}'.format(total_batch_size))
if pseudo_label_ratio >= 0.0:
ratio_factor = pseudo_label_ratio / (1.0 + pseudo_label_ratio)
pseudo_label_batch_size = int(total_batch_size * ratio_factor)
label_batch_size = total_batch_size - pseudo_label_batch_size
else:
if pseudo_label_batch_size > total_batch_size or pseudo_label_batch_size < 0:
raise ValueError(
'The batch size of pseudo-label dataset should not be larger than '
'total global batch size.')
logging.info('data_ratio for pseudo-label dataset is less than 0. '
'Use global_batch_size from pseudo_label data config instead.')
label_batch_size = total_batch_size - pseudo_label_batch_size
return label_batch_size, pseudo_label_batch_size
class CombinationDatasetInputReader(input_reader.InputReader):
"""Combination dataset input reader."""
def __init__(self,
params: cfg.DataConfig,
dataset_fn=tf.data.TFRecordDataset,
pseudo_label_dataset_fn=tf.data.TFRecordDataset,
decoder_fn: Optional[Callable[..., Any]] = None,
combine_fn: Optional[Callable[..., Any]] = None,
sample_fn: Optional[Callable[..., Any]] = None,
parser_fn: Optional[Callable[..., Any]] = None,
transform_and_batch_fn: Optional[Callable[
[tf.data.Dataset, Optional[tf.distribute.InputContext]],
tf.data.Dataset]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None):
"""Initializes an CombinationDatasetInputReader instance.
This class mixes a labeled and pseudo-labeled dataset. The params
must contain "pseudo_label_data.input_path" to specify the
pseudo-label dataset files and "pseudo_label_data.data_ratio"
to specify a per-batch mixing ratio of pseudo-label examples to
labeled dataset examples.
Args:
params: A config_definitions.DataConfig object.
dataset_fn: A `tf.data.Dataset` that consumes the input files. For
example, it can be `tf.data.TFRecordDataset`.
pseudo_label_dataset_fn: A `tf.data.Dataset` that consumes the input
files. For example, it can be `tf.data.TFRecordDataset`.
decoder_fn: An optional `callable` that takes the serialized data string
and decodes them into the raw tensor dictionary.
combine_fn: An optional `callable` that takes a dictionarty of
`tf.data.Dataset` objects as input and outputs a combined dataset. It
will be executed after the decoder_fn and before the sample_fn.
sample_fn: An optional `callable` that takes a `tf.data.Dataset` object as
input and outputs the transformed dataset. It performs sampling on the
decoded raw tensors dict before the parser_fn.
parser_fn: An optional `callable` that takes the decoded raw tensors dict
and parse them into a dictionary of tensors that can be consumed by the
model. It will be executed after decoder_fn.
transform_and_batch_fn: An optional `callable` that takes a
`tf.data.Dataset` object and an optional `tf.distribute.InputContext` as
input, and returns a `tf.data.Dataset` object. It will be executed after
`parser_fn` to transform and batch the dataset; if None, after
`parser_fn` is executed, the dataset will be batched into per-replica
batch size.
postprocess_fn: A optional `callable` that processes batched tensors. It
will be executed after batching.
Raises:
ValueError: If drop_remainder is False.
"""
super().__init__(
params=params,
dataset_fn=dataset_fn,
decoder_fn=decoder_fn,
combine_fn=combine_fn,
sample_fn=sample_fn,
parser_fn=parser_fn,
transform_and_batch_fn=transform_and_batch_fn,
postprocess_fn=postprocess_fn)
self._pseudo_label_file_pattern = params.pseudo_label_data.input_path
self._pseudo_label_dataset_fn = pseudo_label_dataset_fn
self._pseudo_label_data_ratio = params.pseudo_label_data.data_ratio
self._pseudo_label_batch_size = params.pseudo_label_data.global_batch_size
self._pseudo_label_matched_files = input_reader.match_files(
self._pseudo_label_file_pattern)
if not self._drop_remainder:
raise ValueError(
'Must use drop_remainder=True with CombinationDatasetInputReader')
def read(
self,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Generates a tf.data.Dataset object."""
labeled_batch_size, pl_batch_size = calculate_batch_sizes(
self._global_batch_size, self._pseudo_label_data_ratio,
self._pseudo_label_batch_size)
if not labeled_batch_size and pl_batch_size:
raise ValueError(
'Invalid batch_size: {} and pseudo_label_data_ratio: {}, '
'resulting in a 0 batch size for one of the datasets.'.format(
self._global_batch_size, self._pseudo_label_data_ratio))
def _read_decode_and_parse_dataset(matched_files, dataset_fn, batch_size,
input_context):
dataset = self._read_data_source(matched_files, dataset_fn, input_context)
return self._decode_and_parse_dataset(dataset, batch_size, input_context)
labeled_dataset = _read_decode_and_parse_dataset(
matched_files=self._matched_files,
dataset_fn=self._dataset_fn,
batch_size=labeled_batch_size,
input_context=input_context)
pseudo_labeled_dataset = _read_decode_and_parse_dataset(
matched_files=self._pseudo_label_matched_files,
dataset_fn=self._pseudo_label_dataset_fn,
batch_size=pl_batch_size,
input_context=input_context)
def concat_fn(d1, d2):
return tf.nest.map_structure(
lambda x1, x2: tf.concat([x1, x2], axis=0), d1, d2)
dataset_concat = tf.data.Dataset.zip(
(labeled_dataset, pseudo_labeled_dataset))
dataset_concat = dataset_concat.map(
concat_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def maybe_map_fn(dataset, fn):
return dataset if fn is None else dataset.map(
fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset_concat = maybe_map_fn(dataset_concat, self._postprocess_fn)
dataset_concat = self._maybe_apply_data_service(dataset_concat,
input_context)
if self._deterministic is not None:
options = tf.data.Options()
options.experimental_deterministic = self._deterministic
dataset_concat = dataset_concat.with_options(options)
return dataset_concat.prefetch(tf.data.experimental.AUTOTUNE)
| 10,410 | 42.379167 | 81 | py |
models | models-master/official/vision/dataloaders/tf_example_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
from official.vision.dataloaders import decoder
def _generate_source_id(image_bytes):
# Hashing using 22 bits since float32 has only 23 mantissa bits.
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(image_bytes, 2 ** 22 - 1))
class TfExampleDecoder(decoder.Decoder):
"""Tensorflow Example proto decoder."""
def __init__(
self,
include_mask=False,
regenerate_source_id=False,
mask_binarize_threshold=None,
attribute_names=None,
):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.io.FixedLenFeature((), tf.string),
'image/height': tf.io.FixedLenFeature((), tf.int64, -1),
'image/width': tf.io.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),
'image/object/class/label': tf.io.VarLenFeature(tf.int64),
'image/object/area': tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.io.VarLenFeature(tf.int64),
}
attribute_names = attribute_names or []
for attr_name in attribute_names:
self._keys_to_features[f'image/object/attribute/{attr_name}'] = (
tf.io.VarLenFeature(tf.int64)
)
self._attribute_names = attribute_names
self._mask_binarize_threshold = mask_binarize_threshold
if include_mask:
self._keys_to_features.update({
'image/object/mask': tf.io.VarLenFeature(tf.string),
})
if not regenerate_source_id:
self._keys_to_features.update({
'image/source_id': tf.io.FixedLenFeature((), tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_classes(self, parsed_tensors):
return parsed_tensors['image/object/class/label']
def _decode_attributes(self, parsed_tensors):
attribute_dict = dict()
for attr_name in self._attribute_names:
attr_array = parsed_tensors[f'image/object/attribute/{attr_name}']
# TODO(b/269654135): Support decoding of fully 2D attributes.
attribute_dict[attr_name] = tf.expand_dims(attr_array, -1)
return attribute_dict
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
height = tf.cast(parsed_tensors['image/height'], dtype=tf.float32)
width = tf.cast(parsed_tensors['image/width'], dtype=tf.float32)
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin) * height * width)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- source_id: a string scalar tensor.
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
if self._regenerate_source_id:
source_id = _generate_source_id(parsed_tensors['image/encoded'])
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']), 0),
lambda: parsed_tensors['image/source_id'],
lambda: _generate_source_id(parsed_tensors['image/encoded']))
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
classes = self._decode_classes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
attributes = self._decode_attributes(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors['image/height'], -1),
tf.equal(parsed_tensors['image/width'], -1))
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors['image/height'] = tf.where(decode_image_shape,
image_shape[0],
parsed_tensors['image/height'])
parsed_tensors['image/width'] = tf.where(decode_image_shape, image_shape[1],
parsed_tensors['image/width'])
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(classes, dtype=tf.bool))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
if self._mask_binarize_threshold is not None:
masks = tf.cast(masks > self._mask_binarize_threshold, tf.float32)
decoded_tensors = {
'source_id': source_id,
'image': image,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': classes,
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._attribute_names:
decoded_tensors.update({'groundtruth_attributes': attributes})
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| 8,637 | 39.938389 | 80 | py |
models | models-master/official/vision/dataloaders/input_reader_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for getting TF-Vision input readers."""
from official.common import dataset_fn as dataset_fn_util
from official.core import config_definitions as cfg
from official.core import input_reader as core_input_reader
from official.vision.dataloaders import input_reader as vision_input_reader
def input_reader_generator(params: cfg.DataConfig,
**kwargs) -> core_input_reader.InputReader:
"""Instantiates an input reader class according to the params.
Args:
params: A config_definitions.DataConfig object.
**kwargs: Additional arguments passed to input reader initialization.
Returns:
An InputReader object.
"""
if params.is_training and params.get('pseudo_label_data', False):
return vision_input_reader.CombinationDatasetInputReader(
params,
pseudo_label_dataset_fn=dataset_fn_util.pick_dataset_fn(
params.pseudo_label_data.file_type),
**kwargs)
else:
return core_input_reader.InputReader(params, **kwargs)
| 1,623 | 35.909091 | 75 | py |
models | models-master/official/vision/dataloaders/tf_example_label_map_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import csv
# Import libraries
import tensorflow as tf
from official.vision.dataloaders import tf_example_decoder
class TfExampleDecoderLabelMap(tf_example_decoder.TfExampleDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self, label_map, include_mask=False, regenerate_source_id=False,
mask_binarize_threshold=None):
super(TfExampleDecoderLabelMap, self).__init__(
include_mask=include_mask, regenerate_source_id=regenerate_source_id,
mask_binarize_threshold=mask_binarize_threshold)
self._keys_to_features.update({
'image/object/class/text': tf.io.VarLenFeature(tf.string),
})
name_to_id = self._process_label_map(label_map)
self._name_to_id_table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(list(name_to_id.keys()), dtype=tf.string),
values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),
default_value=-1)
def _process_label_map(self, label_map):
if label_map.endswith('.csv'):
name_to_id = self._process_csv(label_map)
else:
raise ValueError('The label map file is in incorrect format.')
return name_to_id
def _process_csv(self, label_map):
name_to_id = {}
with tf.io.gfile.GFile(label_map, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) != 2:
raise ValueError('Each row of the csv label map file must be in '
'`id,name` format. length = {}'.format(len(row)))
id_index = int(row[0])
name = row[1]
name_to_id[name] = id_index
return name_to_id
def _decode_classes(self, parsed_tensors):
return self._name_to_id_table.lookup(
parsed_tensors['image/object/class/text'])
| 2,588 | 37.073529 | 79 | py |
models | models-master/official/vision/dataloaders/tf_example_label_map_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_label_map_decoder.py."""
import os
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.dataloaders import tfexample_utils
LABEL_MAP_CSV_CONTENT = '0,class_0\n1,class_1\n2,class_2'
class TfExampleDecoderLabelMapTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(100, 100, 0),
(100, 100, 1),
(100, 100, 2),
(100, 100, 0),
(100, 100, 1),
(100, 100, 2),
)
def test_result_shape(self, image_height, image_width, num_instances):
label_map_dir = self.get_temp_dir()
label_map_name = 'label_map.csv'
label_map_path = os.path.join(label_map_dir, label_map_name)
with open(label_map_path, 'w') as f:
f.write(LABEL_MAP_CSV_CONTENT)
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map_path, include_mask=True)
serialized_example = tfexample_utils.create_detection_test_example(
image_height=image_height,
image_width=image_width,
image_channel=3,
num_instances=num_instances).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
def test_result_content(self):
label_map_dir = self.get_temp_dir()
label_map_name = 'label_map.csv'
label_map_path = os.path.join(label_map_dir, label_map_name)
with open(label_map_path, 'w') as f:
f.write(LABEL_MAP_CSV_CONTENT)
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map_path, include_mask=True)
image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]
image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG')
image_height = 4
image_width = 4
num_instances = 2
xmins = [0, 0.25]
xmaxs = [0.5, 1.0]
ymins = [0, 0]
ymaxs = [0.5, 1.0]
labels = [b'class_2', b'class_0']
areas = [
0.25 * image_height * image_width, 0.75 * image_height * image_width
]
is_crowds = [1, 0]
mask_content = [[[255, 255, 0, 0],
[255, 255, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255]]]
masks = [
tfexample_utils.encode_image(np.uint8(m), fmt='PNG')
for m in list(mask_content)
]
serialized_example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image]))),
'image/source_id': (tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tfexample_utils.DUMP_SOURCE_ID]))),
'image/height': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_height]))),
'image/width': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_width]))),
'image/object/bbox/xmin': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmins))),
'image/object/bbox/xmax': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs))),
'image/object/bbox/ymin': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymins))),
'image/object/bbox/ymax': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs))),
'image/object/class/text': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=labels))),
'image/object/is_crowd': (tf.train.Feature(
int64_list=tf.train.Int64List(value=is_crowds))),
'image/object/area': (tf.train.Feature(
float_list=tf.train.FloatList(value=areas))),
'image/object/mask': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=masks))),
})).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertAllEqual(image_content, results['image'])
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
self.assertAllEqual(
[2, 0], results['groundtruth_classes'])
self.assertAllEqual(
[True, False], results['groundtruth_is_crowd'])
self.assertNDArrayNear(
[0.25 * image_height * image_width, 0.75 * image_height * image_width],
results['groundtruth_area'], 1e-4)
self.assertNDArrayNear(
[[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]],
results['groundtruth_boxes'], 1e-4)
self.assertNDArrayNear(
mask_content, results['groundtruth_instance_masks'], 1e-4)
self.assertAllEqual(
masks, results['groundtruth_instance_masks_png'])
if __name__ == '__main__':
tf.test.main()
| 7,746 | 39.989418 | 79 | py |
models | models-master/official/vision/dataloaders/tfexample_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to create tf.Example and tf.SequnceExample for test.
Example:video classification end-to-end test
i.e. from reading input file to train and eval.
```python
class FooTrainTest(tf.test.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
# Write the fake tf.train.SequenceExample to file for test.
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
examples = [
tfexample_utils.make_video_test_example(
image_shape=(36, 36, 3),
audio_shape=(20, 128),
label=random.randint(0, 100)) for _ in range(2)
]
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
def test_foo(self):
dataset = tf.data.TFRecordDataset(self._data_path)
...
```
"""
from typing import Mapping, Optional, Sequence, Union
import numpy as np
import tensorflow as tf
from official.core import file_writers
from official.vision.data import fake_feature_generator
from official.vision.data import image_utils
from official.vision.data import tf_example_builder
IMAGE_KEY = 'image/encoded'
CLASSIFICATION_LABEL_KEY = 'image/class/label'
DISTILLATION_LABEL_KEY = 'image/class/soft_labels'
LABEL_KEY = 'clip/label/index'
AUDIO_KEY = 'features/audio'
DUMP_SOURCE_ID = b'7435790'
def encode_image(image_array: np.ndarray, fmt: str) -> bytes:
return image_utils.encode_image(image_array, fmt)
def make_image_bytes(shape: Sequence[int], fmt: str = 'JPEG') -> bytes:
"""Generates image and return bytes in specified format."""
image = fake_feature_generator.generate_image_np(*shape)
return encode_image(image, fmt=fmt)
def put_int64_to_context(seq_example: tf.train.SequenceExample,
label: int = 0,
key: str = LABEL_KEY):
"""Puts int64 to SequenceExample context with key."""
seq_example.context.feature[key].int64_list.value[:] = [label]
def put_bytes_list_to_feature(seq_example: tf.train.SequenceExample,
raw_image_bytes: bytes,
key: str = IMAGE_KEY,
repeat_num: int = 2):
"""Puts bytes list to SequenceExample context with key."""
for _ in range(repeat_num):
seq_example.feature_lists.feature_list.get_or_create(
key).feature.add().bytes_list.value[:] = [raw_image_bytes]
def put_float_list_to_feature(seq_example: tf.train.SequenceExample,
value: Sequence[Sequence[float]], key: str):
"""Puts float list to SequenceExample context with key."""
for s in value:
seq_example.feature_lists.feature_list.get_or_create(
key).feature.add().float_list.value[:] = s
def make_video_test_example(image_shape: Sequence[int] = (263, 320, 3),
audio_shape: Sequence[int] = (10, 256),
label: int = 42):
"""Generates data for testing video models (inc. RGB, audio, & label)."""
raw_image_bytes = make_image_bytes(shape=image_shape)
random_audio = np.random.normal(size=audio_shape).tolist()
seq_example = tf.train.SequenceExample()
put_int64_to_context(seq_example, label=label, key=LABEL_KEY)
put_bytes_list_to_feature(
seq_example, raw_image_bytes, key=IMAGE_KEY, repeat_num=4)
put_float_list_to_feature(seq_example, value=random_audio, key=AUDIO_KEY)
return seq_example
def dump_to_tfrecord(record_file: str,
tf_examples: Sequence[Union[tf.train.Example,
tf.train.SequenceExample]]):
"""Writes serialized Example to TFRecord file with path.
Note that examples are expected to be not seriazlied.
Args:
record_file: The name of the output file.
tf_examples: A list of examples to be stored.
"""
file_writers.write_small_dataset(tf_examples, record_file, 'tfrecord')
def create_classification_example(
image_height: int,
image_width: int,
image_format: str = 'JPEG',
is_multilabel: bool = False,
output_serialized_example: bool = True) -> tf.train.Example:
"""Creates image and labels for image classification input pipeline.
Args:
image_height: The height of test image.
image_width: The width of test image.
image_format: The format of test image.
is_multilabel: A boolean flag represents whether the test image can have
multiple labels.
output_serialized_example: A boolean flag represents whether to return a
serialized example.
Returns:
A tf.train.Example for testing.
"""
image = fake_feature_generator.generate_image_np(image_height, image_width)
labels = fake_feature_generator.generate_classes_np(2,
int(is_multilabel) +
1).tolist()
builder = tf_example_builder.TfExampleBuilder()
example = builder.add_image_matrix_feature(image, image_format,
DUMP_SOURCE_ID).add_ints_feature(
CLASSIFICATION_LABEL_KEY,
labels).example
if output_serialized_example:
return example.SerializeToString()
return example
def create_distillation_example(
image_height: int,
image_width: int,
num_labels: int,
image_format: str = 'JPEG',
output_serialized_example: bool = True) -> tf.train.Example:
"""Creates image and labels for image classification with distillation.
Args:
image_height: The height of test image.
image_width: The width of test image.
num_labels: The number of labels used in test image.
image_format: The format of test image.
output_serialized_example: A boolean flag represents whether to return a
serialized example.
Returns:
A tf.train.Example for testing.
"""
image = fake_feature_generator.generate_image_np(image_height, image_width)
labels = fake_feature_generator.generate_classes_np(2, 1).tolist()
soft_labels = (fake_feature_generator.generate_classes_np(1, num_labels) +
0.6).tolist()
builder = tf_example_builder.TfExampleBuilder()
example = builder.add_image_matrix_feature(image, image_format,
DUMP_SOURCE_ID).add_ints_feature(
CLASSIFICATION_LABEL_KEY,
labels).add_floats_feature(
DISTILLATION_LABEL_KEY,
soft_labels).example
if output_serialized_example:
return example.SerializeToString()
return example
def create_3d_image_test_example(
image_height: int,
image_width: int,
image_volume: int,
image_channel: int,
output_serialized_example: bool = False) -> tf.train.Example:
"""Creates 3D image and label.
Args:
image_height: The height of test 3D image.
image_width: The width of test 3D image.
image_volume: The volume of test 3D image.
image_channel: The channel of test 3D image.
output_serialized_example: A boolean flag represents whether to return a
serialized example.
Returns:
A tf.train.Example for testing.
"""
image = fake_feature_generator.generate_image_np(image_height, image_width,
image_channel)
images = image[:, :, np.newaxis, :]
images = np.tile(images, [1, 1, image_volume, 1]).astype(np.float32)
shape = [image_height, image_width, image_volume, image_channel]
labels = fake_feature_generator.generate_classes_np(
2, np.prod(shape)).reshape(shape).astype(np.float32)
builder = tf_example_builder.TfExampleBuilder()
example = builder.add_bytes_feature(IMAGE_KEY,
images.tobytes()).add_bytes_feature(
CLASSIFICATION_LABEL_KEY,
labels.tobytes()).example
if output_serialized_example:
return example.SerializeToString()
return example
def create_detection_test_example(
image_height: int,
image_width: int,
image_channel: int,
num_instances: int,
fill_image_size: bool = True,
output_serialized_example: bool = False) -> tf.train.Example:
"""Creates and returns a test example containing box and mask annotations.
Args:
image_height: The height of test image.
image_width: The width of test image.
image_channel: The channel of test image.
num_instances: The number of object instances per image.
fill_image_size: If image height and width will be added to the example.
output_serialized_example: A boolean flag represents whether to return a
serialized example.
Returns:
A tf.train.Example for testing.
"""
image = fake_feature_generator.generate_image_np(image_height, image_width,
image_channel)
boxes = fake_feature_generator.generate_normalized_boxes_np(num_instances)
ymins, xmins, ymaxs, xmaxs = boxes.T.tolist()
is_crowds = [0] * num_instances
labels = fake_feature_generator.generate_classes_np(
2, size=num_instances).tolist()
labels_text = [b'class_1'] * num_instances
masks = fake_feature_generator.generate_instance_masks_np(
image_height, image_width, boxes)
builder = tf_example_builder.TfExampleBuilder()
example = builder.add_image_matrix_feature(
image, image_source_id=DUMP_SOURCE_ID).add_boxes_feature(
xmins, xmaxs, ymins, ymaxs,
labels).add_instance_mask_matrices_feature(masks).add_ints_feature(
'image/object/is_crowd',
is_crowds).add_bytes_feature('image/object/class/text',
labels_text).example
if not fill_image_size:
del example.features.feature['image/height']
del example.features.feature['image/width']
if output_serialized_example:
return example.SerializeToString()
return example
def create_segmentation_test_example(
image_height: int,
image_width: int,
image_channel: int,
output_serialized_example: bool = False,
dense_features: Optional[Mapping[str, int]] = None) -> tf.train.Example:
"""Creates and returns a test example containing mask annotations.
Args:
image_height: The height of test image.
image_width: The width of test image.
image_channel: The channel of test image.
output_serialized_example: A boolean flag represents whether to return a
serialized example.
dense_features: An optional dictionary of additional dense features, where
the key is the prefix of the feature key in tf.Example and the value is
the number of the channels of this feature.
Returns:
A tf.train.Example for testing.
"""
image = fake_feature_generator.generate_image_np(image_height, image_width,
image_channel)
mask = fake_feature_generator.generate_semantic_mask_np(
image_height, image_width, 3)
builder = tf_example_builder.TfExampleBuilder()
builder.add_image_matrix_feature(
image,
image_source_id=DUMP_SOURCE_ID).add_semantic_mask_matrix_feature(mask)
if dense_features:
for prefix, channel in dense_features.items():
dense_feature = fake_feature_generator.generate_semantic_mask_np(
image_height, image_width, channel)
builder.add_semantic_mask_matrix_feature(
dense_feature, feature_prefix=prefix)
example = builder.example
if output_serialized_example:
return example.SerializeToString()
return example
| 12,384 | 36.990798 | 78 | py |
models | models-master/official/vision/dataloaders/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader utils."""
from typing import Dict
# Import libraries
import tensorflow as tf
from official.vision.ops import preprocess_ops
def process_source_id(source_id: tf.Tensor) -> tf.Tensor:
"""Processes source_id to the right format.
Args:
source_id: A `tf.Tensor` that contains the source ID. It can be empty.
Returns:
A formatted source ID.
"""
if source_id.dtype == tf.string:
source_id = tf.strings.to_number(source_id, tf.int64)
with tf.control_dependencies([source_id]):
source_id = tf.cond(
pred=tf.equal(tf.size(input=source_id), 0),
true_fn=lambda: tf.cast(tf.constant(-1), tf.int64),
false_fn=lambda: tf.identity(source_id))
return source_id
def pad_groundtruths_to_fixed_size(groundtruths: Dict[str, tf.Tensor],
size: int) -> Dict[str, tf.Tensor]:
"""Pads the first dimension of groundtruths labels to the fixed size.
Args:
groundtruths: A dictionary of {`str`: `tf.Tensor`} that contains groundtruth
annotations of `boxes`, `is_crowds`, `areas` and `classes`.
size: An `int` that specifies the expected size of the first dimension of
padded tensors.
Returns:
A dictionary of the same keys as input and padded tensors as values.
"""
groundtruths['boxes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['boxes'], size, -1)
groundtruths['is_crowds'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['is_crowds'], size, 0)
groundtruths['areas'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['areas'], size, -1)
groundtruths['classes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['classes'], size, -1)
if 'attributes' in groundtruths:
for k, v in groundtruths['attributes'].items():
groundtruths['attributes'][k] = preprocess_ops.clip_or_pad_to_fixed_size(
v, size, -1)
return groundtruths
def binarize_matting_map(matting_map: tf.Tensor,
threshold: float = 0.5) -> tf.Tensor:
"""Binarizes a matting map.
If the matting_map value is above a threshold, set it as 1 otherwise 0. The
binarization is done for every element in the matting_map.
Args:
matting_map: The groundtruth in the matting map format.
threshold: The threshold used to binarize the matting map.
Returns:
The binarized labels (0 for BG, 1 for FG) as tf.float32.
"""
return tf.cast(tf.greater(matting_map, threshold), tf.float32)
| 3,103 | 34.678161 | 80 | py |
models | models-master/official/vision/dataloaders/video_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for video and label datasets."""
from typing import Dict, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.vision.configs import video_classification as exp_cfg
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
from official.vision.ops import augment
from official.vision.ops import preprocess_ops_3d
IMAGE_KEY = 'image/encoded'
LABEL_KEY = 'clip/label/index'
def process_image(image: tf.Tensor,
is_training: bool = True,
num_frames: int = 32,
stride: int = 1,
random_stride_range: int = 0,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: Union[int, Tuple[int, int]] = 224,
num_channels: int = 3,
num_crops: int = 1,
zero_centering_image: bool = False,
min_aspect_ratio: float = 0.5,
max_aspect_ratio: float = 2,
min_area_ratio: float = 0.49,
max_area_ratio: float = 1.0,
augmenter: Optional[augment.ImageAugment] = None,
seed: Optional[int] = None) -> tf.Tensor:
"""Processes a serialized image tensor.
Args:
image: Input Tensor of shape [time-steps] and type tf.string of serialized
frames.
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
num_frames: Number of frames per sub clip.
stride: Temporal stride to sample frames.
random_stride_range: An int indicating the min and max bounds to uniformly
sample different strides from the video. E.g., a value of 1 with stride=2
will uniformly sample a stride in {1, 2, 3} for each video in a batch.
Only used enabled training for the purposes of frame-rate augmentation.
Defaults to 0, which disables random sampling.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggregated in the batch dimension.
min_resize: Frames are resized so that min(height, width) is min_resize.
crop_size: Final size of the frame after cropping the resized frames.
Optionally, specify a tuple of (crop_height, crop_width) if
crop_height != crop_width.
num_channels: Number of channels of the clip.
num_crops: Number of crops to perform on the resized frames.
zero_centering_image: If True, frames are normalized to values in [-1, 1].
If False, values in [0, 1].
min_aspect_ratio: The minimum aspect range for cropping.
max_aspect_ratio: The maximum aspect range for cropping.
min_area_ratio: The minimum area range for cropping.
max_area_ratio: The maximum area range for cropping.
augmenter: Image augmenter to distort each image.
seed: A deterministic seed to use when sampling.
Returns:
Processed frames. Tensor of shape
[num_frames * num_test_clips, crop_height, crop_width, num_channels].
"""
# Validate parameters.
if is_training and num_test_clips != 1:
logging.warning(
'`num_test_clips` %d is ignored since `is_training` is `True`.',
num_test_clips)
if random_stride_range < 0:
raise ValueError('Random stride range should be >= 0, got {}'.format(
random_stride_range))
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
crop_height, crop_width = crop_size
# Temporal sampler.
if is_training:
if random_stride_range > 0:
# Uniformly sample different frame-rates
stride = tf.random.uniform(
[],
tf.maximum(stride - random_stride_range, 1),
stride + random_stride_range,
dtype=tf.int32)
# Sample random clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, True, stride,
seed)
elif num_test_clips > 1:
# Sample linspace clips.
image = preprocess_ops_3d.sample_linspace_sequence(image, num_test_clips,
num_frames, stride)
else:
# Sample middle clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, False, stride)
# Decode JPEG string to tf.uint8.
if image.dtype == tf.string:
image = preprocess_ops_3d.decode_jpeg(image, num_channels)
if is_training:
# Standard image data augmentation: random resized crop and random flip.
image = preprocess_ops_3d.random_crop_resize(
image, crop_height, crop_width, num_frames, num_channels,
(min_aspect_ratio, max_aspect_ratio),
(min_area_ratio, max_area_ratio))
image = preprocess_ops_3d.random_flip_left_right(image, seed)
if augmenter is not None:
image = augmenter.distort(image)
else:
# Resize images (resize happens only if necessary to save compute).
image = preprocess_ops_3d.resize_smallest(image, min_resize)
# Crop of the frames.
image = preprocess_ops_3d.crop_image(image, crop_height, crop_width, False,
num_crops)
# Cast the frames in float32, normalizing according to zero_centering_image.
return preprocess_ops_3d.normalize_image(image, zero_centering_image)
def postprocess_image(image: tf.Tensor,
is_training: bool = True,
num_frames: int = 32,
num_test_clips: int = 1,
num_test_crops: int = 1) -> tf.Tensor:
"""Processes a batched Tensor of frames.
The same parameters used in process should be used here.
Args:
image: Input Tensor of shape [batch, time-steps, height, width, 3].
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
num_frames: Number of frames per sub clip.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggregated in the batch dimension.
num_test_crops: Number of test crops (1 by default). If more than 1, there
are multiple crops for each clip at test time. If 1, there is a single
central crop. The crops are aggregated in the batch dimension.
Returns:
Processed frames. Tensor of shape
[batch * num_test_clips * num_test_crops, num_frames, height, width, 3].
"""
num_views = num_test_clips * num_test_crops
if num_views > 1 and not is_training:
# In this case, multiple views are merged together in batch dimension which
# will be batch * num_views.
image = tf.reshape(image, [-1, num_frames] + image.shape[2:].as_list())
return image
def process_label(label: tf.Tensor,
one_hot_label: bool = True,
num_classes: Optional[int] = None,
label_dtype: tf.DType = tf.int32) -> tf.Tensor:
"""Processes label Tensor."""
# Validate parameters.
if one_hot_label and not num_classes:
raise ValueError(
'`num_classes` should be given when requesting one hot label.')
# Cast to label_dtype (default = tf.int32).
label = tf.cast(label, dtype=label_dtype)
if one_hot_label:
# Replace label index by one hot representation.
label = tf.one_hot(label, num_classes)
if len(label.shape.as_list()) > 1:
label = tf.reduce_sum(label, axis=0)
if num_classes == 1:
# The trick for single label.
label = 1 - label
return label
class Decoder(decoder.Decoder):
"""A tf.Example decoder for classification task."""
def __init__(self, image_key: str = IMAGE_KEY, label_key: str = LABEL_KEY):
self._context_description = {
# One integer stored in context.
label_key: tf.io.VarLenFeature(tf.int64),
}
self._sequence_description = {
# Each image is a string encoding JPEG.
image_key: tf.io.FixedLenSequenceFeature((), tf.string),
}
def add_feature(self, feature_name: str,
feature_type: Union[tf.io.VarLenFeature,
tf.io.FixedLenFeature,
tf.io.FixedLenSequenceFeature]):
self._sequence_description[feature_name] = feature_type
def add_context(self, feature_name: str,
feature_type: Union[tf.io.VarLenFeature,
tf.io.FixedLenFeature,
tf.io.FixedLenSequenceFeature]):
self._context_description[feature_name] = feature_type
def decode(self, serialized_example):
"""Parses a single tf.Example into image and label tensors."""
result = {}
context, sequences = tf.io.parse_single_sequence_example(
serialized_example, self._context_description,
self._sequence_description)
result.update(context)
result.update(sequences)
for key, value in result.items():
if isinstance(value, tf.SparseTensor):
result[key] = tf.sparse.to_dense(value)
return result
class VideoTfdsDecoder(decoder.Decoder):
"""A tf.SequenceExample decoder for tfds video classification datasets."""
def __init__(self, image_key: str = IMAGE_KEY, label_key: str = LABEL_KEY):
self._image_key = image_key
self._label_key = label_key
def decode(self, features):
"""Decode the TFDS FeatureDict.
Args:
features: features from TFDS video dataset.
See https://www.tensorflow.org/datasets/catalog/ucf101 for example.
Returns:
Dict of tensors.
"""
sample_dict = {
self._image_key: features['video'],
self._label_key: features['label'],
}
return sample_dict
class Parser(parser.Parser):
"""Parses a video and label dataset."""
def __init__(self,
input_params: exp_cfg.DataConfig,
image_key: str = IMAGE_KEY,
label_key: str = LABEL_KEY):
self._num_frames = input_params.feature_shape[0]
self._stride = input_params.temporal_stride
self._random_stride_range = input_params.random_stride_range
self._num_test_clips = input_params.num_test_clips
self._min_resize = input_params.min_image_size
crop_height = input_params.feature_shape[1]
crop_width = input_params.feature_shape[2]
self._crop_size = crop_height if crop_height == crop_width else (
crop_height, crop_width)
self._num_channels = input_params.feature_shape[3]
self._num_crops = input_params.num_test_crops
self._zero_centering_image = input_params.zero_centering_image
self._one_hot_label = input_params.one_hot
self._num_classes = input_params.num_classes
self._image_key = image_key
self._label_key = label_key
self._dtype = tf.dtypes.as_dtype(input_params.dtype)
self._label_dtype = tf.dtypes.as_dtype(input_params.label_dtype)
self._output_audio = input_params.output_audio
self._min_aspect_ratio = input_params.aug_min_aspect_ratio
self._max_aspect_ratio = input_params.aug_max_aspect_ratio
self._min_area_ratio = input_params.aug_min_area_ratio
self._max_area_ratio = input_params.aug_max_area_ratio
if self._output_audio:
self._audio_feature = input_params.audio_feature
self._audio_shape = input_params.audio_feature_shape
aug_type = input_params.aug_type
if aug_type is not None:
if aug_type.type == 'autoaug':
logging.info('Using AutoAugment.')
self._augmenter = augment.AutoAugment(
augmentation_name=aug_type.autoaug.augmentation_name,
cutout_const=aug_type.autoaug.cutout_const,
translate_const=aug_type.autoaug.translate_const)
elif aug_type.type == 'randaug':
logging.info('Using RandAugment.')
self._augmenter = augment.RandAugment(
num_layers=aug_type.randaug.num_layers,
magnitude=aug_type.randaug.magnitude,
cutout_const=aug_type.randaug.cutout_const,
translate_const=aug_type.randaug.translate_const,
prob_to_apply=aug_type.randaug.prob_to_apply,
exclude_ops=aug_type.randaug.exclude_ops)
else:
raise ValueError(
'Augmentation policy {} not supported.'.format(aug_type.type))
else:
self._augmenter = None
def _parse_train_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for training."""
# Process image and label.
image = decoded_tensors[self._image_key]
image = process_image(
image=image,
is_training=True,
num_frames=self._num_frames,
stride=self._stride,
random_stride_range=self._random_stride_range,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
num_channels=self._num_channels,
min_aspect_ratio=self._min_aspect_ratio,
max_aspect_ratio=self._max_aspect_ratio,
min_area_ratio=self._min_area_ratio,
max_area_ratio=self._max_area_ratio,
augmenter=self._augmenter,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = process_label(label, self._one_hot_label, self._num_classes,
self._label_dtype)
if self._output_audio:
audio = decoded_tensors[self._audio_feature]
audio = tf.cast(audio, dtype=self._dtype)
# TODO(yeqing): synchronize audio/video sampling. Especially randomness.
audio = preprocess_ops_3d.sample_sequence(
audio, self._audio_shape[0], random=False, stride=1)
audio = tf.ensure_shape(audio, self._audio_shape)
features['audio'] = audio
return features, label
def _parse_eval_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for evaluation."""
image = decoded_tensors[self._image_key]
image = process_image(
image=image,
is_training=False,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
num_channels=self._num_channels,
num_crops=self._num_crops,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = process_label(label, self._one_hot_label, self._num_classes,
self._label_dtype)
if self._output_audio:
audio = decoded_tensors[self._audio_feature]
audio = tf.cast(audio, dtype=self._dtype)
audio = preprocess_ops_3d.sample_sequence(
audio, self._audio_shape[0], random=False, stride=1)
audio = tf.ensure_shape(audio, self._audio_shape)
features['audio'] = audio
return features, label
class PostBatchProcessor(object):
"""Processes a video and label dataset which is batched."""
def __init__(self, input_params: exp_cfg.DataConfig):
self._is_training = input_params.is_training
self._num_frames = input_params.feature_shape[0]
self._num_test_clips = input_params.num_test_clips
self._num_test_crops = input_params.num_test_crops
def __call__(self, features: Dict[str, tf.Tensor],
label: tf.Tensor) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses a single tf.Example into image and label tensors."""
for key in ['image']:
if key in features:
features[key] = postprocess_image(
image=features[key],
is_training=self._is_training,
num_frames=self._num_frames,
num_test_clips=self._num_test_clips,
num_test_crops=self._num_test_crops)
return features, label
| 16,753 | 38.607565 | 80 | py |
models | models-master/official/vision/dataloaders/tf_example_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_decoder.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfexample_utils
class TfExampleDecoderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(100, 100, 0, True, True),
(100, 100, 1, True, True),
(100, 100, 2, True, True),
(100, 100, 0, False, True),
(100, 100, 1, False, True),
(100, 100, 2, False, True),
(100, 100, 0, True, False),
(100, 100, 1, True, False),
(100, 100, 2, True, False),
(100, 100, 0, False, False),
(100, 100, 1, False, False),
(100, 100, 2, False, False),
)
def test_result_shape(self, image_height, image_width, num_instances,
regenerate_source_id, fill_image_size):
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=True, regenerate_source_id=regenerate_source_id)
serialized_example = tfexample_utils.create_detection_test_example(
image_height=image_height,
image_width=image_width,
image_channel=3,
num_instances=num_instances,
fill_image_size=fill_image_size,
).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
if not regenerate_source_id:
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
def test_result_content(self):
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=True, attribute_names=['attr1', 'attr2']
)
image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]
image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG')
image_height = 4
image_width = 4
num_instances = 2
xmins = [0, 0.25]
xmaxs = [0.5, 1.0]
ymins = [0, 0]
ymaxs = [0.5, 1.0]
labels = [3, 1]
attr1 = np.array([[0], [2]])
attr2 = np.array([[1], [3]])
areas = [
0.25 * image_height * image_width, 0.75 * image_height * image_width
]
is_crowds = [1, 0]
mask_content = [[[255, 255, 0, 0],
[255, 255, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255]]]
masks = [
tfexample_utils.encode_image(np.uint8(m), fmt='PNG')
for m in list(mask_content)
]
serialized_example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])
),
'image/source_id': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tfexample_utils.DUMP_SOURCE_ID]
)
),
'image/height': tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_height])
),
'image/width': tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_width])
),
'image/object/bbox/xmin': tf.train.Feature(
float_list=tf.train.FloatList(value=xmins)
),
'image/object/bbox/xmax': tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs)
),
'image/object/bbox/ymin': tf.train.Feature(
float_list=tf.train.FloatList(value=ymins)
),
'image/object/bbox/ymax': tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs)
),
'image/object/class/label': tf.train.Feature(
int64_list=tf.train.Int64List(value=labels)
),
'image/object/is_crowd': tf.train.Feature(
int64_list=tf.train.Int64List(value=is_crowds)
),
'image/object/area': tf.train.Feature(
float_list=tf.train.FloatList(value=areas)
),
'image/object/mask': tf.train.Feature(
bytes_list=tf.train.BytesList(value=masks)
),
'image/object/attribute/attr1': tf.train.Feature(
int64_list=tf.train.Int64List(value=attr1.flatten())
),
'image/object/attribute/attr2': tf.train.Feature(
int64_list=tf.train.Int64List(value=attr2.flatten())
),
}
)
).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertAllEqual(image_content, results['image'])
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
self.assertAllEqual(
[3, 1], results['groundtruth_classes'])
np.testing.assert_array_equal(
attr1, results['groundtruth_attributes']['attr1']
)
np.testing.assert_array_equal(
attr2, results['groundtruth_attributes']['attr2']
)
self.assertAllEqual([True, False], results['groundtruth_is_crowd'])
self.assertNDArrayNear(
[0.25 * image_height * image_width, 0.75 * image_height * image_width],
results['groundtruth_area'], 1e-4)
self.assertNDArrayNear(
[[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]],
results['groundtruth_boxes'], 1e-4)
self.assertNDArrayNear(
mask_content, results['groundtruth_instance_masks'], 1e-4)
self.assertAllEqual(
masks, results['groundtruth_instance_masks_png'])
def test_handling_missing_fields(self):
decoder = tf_example_decoder.TfExampleDecoder(include_mask=True)
image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]
image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG')
image_height = 4
image_width = 4
num_instances = 2
xmins = [0, 0.25]
xmaxs = [0.5, 1.0]
ymins = [0, 0]
ymaxs = [0.5, 1.0]
labels = [3, 1]
mask_content = [[[255, 255, 0, 0],
[255, 255, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255]]]
masks = [
tfexample_utils.encode_image(np.uint8(m), fmt='PNG')
for m in list(mask_content)
]
serialized_example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image]))),
'image/source_id': (tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tfexample_utils.DUMP_SOURCE_ID]))),
'image/height': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_height]))),
'image/width': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_width]))),
'image/object/bbox/xmin': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmins))),
'image/object/bbox/xmax': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs))),
'image/object/bbox/ymin': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymins))),
'image/object/bbox/ymax': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs))),
'image/object/class/label': (tf.train.Feature(
int64_list=tf.train.Int64List(value=labels))),
'image/object/mask': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=masks))),
})).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertAllEqual(image_content, results['image'])
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
self.assertAllEqual(
[3, 1], results['groundtruth_classes'])
self.assertAllEqual(
[False, False], results['groundtruth_is_crowd'])
self.assertNDArrayNear(
[0.25 * image_height * image_width, 0.75 * image_height * image_width],
results['groundtruth_area'], 1e-4)
self.assertNDArrayNear(
[[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]],
results['groundtruth_boxes'], 1e-4)
self.assertNDArrayNear(
mask_content, results['groundtruth_instance_masks'], 1e-4)
self.assertAllEqual(
masks, results['groundtruth_instance_masks_png'])
if __name__ == '__main__':
tf.test.main()
| 12,619 | 40.650165 | 79 | py |
models | models-master/official/vision/dataloaders/video_input_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
# Import libraries
import numpy as np
from PIL import Image
import tensorflow as tf
import tensorflow_datasets as tfds
from official.vision.configs import common
from official.vision.configs import video_classification as exp_cfg
from official.vision.dataloaders import video_input
AUDIO_KEY = 'features/audio'
def fake_seq_example():
# Create fake data.
random_image = np.random.randint(0, 256, size=(263, 320, 3), dtype=np.uint8)
random_image = Image.fromarray(random_image)
label = 42
with io.BytesIO() as buffer:
random_image.save(buffer, format='JPEG')
raw_image_bytes = buffer.getvalue()
seq_example = tf.train.SequenceExample()
seq_example.feature_lists.feature_list.get_or_create(
video_input.IMAGE_KEY).feature.add().bytes_list.value[:] = [
raw_image_bytes
]
seq_example.feature_lists.feature_list.get_or_create(
video_input.IMAGE_KEY).feature.add().bytes_list.value[:] = [
raw_image_bytes
]
seq_example.context.feature[video_input.LABEL_KEY].int64_list.value[:] = [
label
]
random_audio = np.random.normal(size=(10, 256)).tolist()
for s in random_audio:
seq_example.feature_lists.feature_list.get_or_create(
AUDIO_KEY).feature.add().float_list.value[:] = s
return seq_example, label
class DecoderTest(tf.test.TestCase):
"""A tf.SequenceExample decoder for the video classification task."""
def test_decoder(self):
decoder = video_input.Decoder()
seq_example, label = fake_seq_example()
serialized_example = seq_example.SerializeToString()
decoded_tensors = decoder.decode(tf.convert_to_tensor(serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertCountEqual([video_input.IMAGE_KEY, video_input.LABEL_KEY],
results.keys())
self.assertEqual(label, results[video_input.LABEL_KEY])
def test_decode_audio(self):
decoder = video_input.Decoder()
decoder.add_feature(AUDIO_KEY, tf.io.VarLenFeature(dtype=tf.float32))
seq_example, label = fake_seq_example()
serialized_example = seq_example.SerializeToString()
decoded_tensors = decoder.decode(tf.convert_to_tensor(serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertCountEqual(
[video_input.IMAGE_KEY, video_input.LABEL_KEY, AUDIO_KEY],
results.keys())
self.assertEqual(label, results[video_input.LABEL_KEY])
self.assertEqual(results[AUDIO_KEY].shape, (10, 256))
def test_tfds_decode(self):
with tfds.testing.mock_data(num_examples=1):
dataset = tfds.load('ucf101', split='train').take(1)
data = next(iter(dataset))
decoder = video_input.VideoTfdsDecoder()
decoded_tensors = decoder.decode(data)
self.assertContainsSubset([video_input.LABEL_KEY, video_input.IMAGE_KEY],
decoded_tensors.keys())
class VideoAndLabelParserTest(tf.test.TestCase):
def test_video_input(self):
params = exp_cfg.kinetics600(is_training=True)
params.feature_shape = (2, 224, 224, 3)
params.min_image_size = 224
decoder = video_input.Decoder()
parser = video_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (2, 224, 224, 3))
self.assertAllEqual(label.shape, (600,))
def test_video_audio_input(self):
params = exp_cfg.kinetics600(is_training=True)
params.feature_shape = (2, 224, 224, 3)
params.min_image_size = 224
params.output_audio = True
params.audio_feature = AUDIO_KEY
params.audio_feature_shape = (15, 256)
decoder = video_input.Decoder()
decoder.add_feature(params.audio_feature,
tf.io.VarLenFeature(dtype=tf.float32))
parser = video_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
features, label = output_tensor
image = features['image']
audio = features['audio']
self.assertAllEqual(image.shape, (2, 224, 224, 3))
self.assertAllEqual(label.shape, (600,))
self.assertEqual(audio.shape, (15, 256))
def test_video_input_random_stride(self):
params = exp_cfg.kinetics600(is_training=True)
params.feature_shape = (2, 224, 224, 3)
params.min_image_size = 224
params.temporal_stride = 2
params.random_stride_range = 1
decoder = video_input.Decoder()
parser = video_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (2, 224, 224, 3))
self.assertAllEqual(label.shape, (600,))
def test_video_input_augmentation_returns_shape(self):
params = exp_cfg.kinetics600(is_training=True)
params.feature_shape = (2, 224, 224, 3)
params.min_image_size = 224
params.temporal_stride = 2
params.aug_type = common.Augmentation(
type='autoaug', autoaug=common.AutoAugment())
decoder = video_input.Decoder()
parser = video_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (2, 224, 224, 3))
self.assertAllEqual(label.shape, (600,))
def test_video_input_image_shape_label_type(self):
params = exp_cfg.kinetics600(is_training=True)
params.feature_shape = (2, 168, 224, 1)
params.min_image_size = 168
params.label_dtype = 'float32'
params.one_hot = False
decoder = video_input.Decoder()
parser = video_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (2, 168, 224, 1))
self.assertAllEqual(label.shape, (1,))
self.assertDTypeEqual(label, tf.float32)
if __name__ == '__main__':
tf.test.main()
| 7,541 | 33.438356 | 78 | py |
models | models-master/official/vision/dataloaders/decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The generic decoder interface."""
import abc
class Decoder(metaclass=abc.ABCMeta):
"""Decodes the raw data into tensors."""
@abc.abstractmethod
def decode(self, serialized_example):
"""Decodes the serialized example into tensors.
Args:
serialized_example: a serialized string tensor that encodes the data.
Returns:
decoded_tensors: a dict of Tensors.
"""
pass
| 1,016 | 28.911765 | 75 | py |
models | models-master/official/vision/dataloaders/tfds_segmentation_decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS Semantic Segmentation decoders."""
import tensorflow as tf
from official.vision.dataloaders import decoder
class CityScapesDecorder(decoder.Decoder):
"""A tf.Example decoder for tfds cityscapes datasets."""
def __init__(self):
# Original labels to trainable labels map, 255 is the ignore class.
self._label_map = {
-1: 255,
0: 255,
1: 255,
2: 255,
3: 255,
4: 255,
5: 255,
6: 255,
7: 0,
8: 1,
9: 255,
10: 255,
11: 2,
12: 3,
13: 4,
14: 255,
15: 255,
16: 255,
17: 5,
18: 255,
19: 6,
20: 7,
21: 8,
22: 9,
23: 10,
24: 11,
25: 12,
26: 13,
27: 14,
28: 15,
29: 255,
30: 255,
31: 16,
32: 17,
33: 18,
}
def decode(self, serialized_example):
# Convert labels according to the self._label_map
label = serialized_example['segmentation_label']
for original_label in self._label_map:
label = tf.where(label == original_label,
self._label_map[original_label] * tf.ones_like(label),
label)
sample_dict = {
'image/encoded':
tf.io.encode_jpeg(serialized_example['image_left'], quality=100),
'image/height': serialized_example['image_left'].shape[0],
'image/width': serialized_example['image_left'].shape[1],
'image/segmentation/class/encoded':
tf.io.encode_png(label),
}
return sample_dict
TFDS_ID_TO_DECODER_MAP = {
'cityscapes': CityScapesDecorder,
'cityscapes/semantic_segmentation': CityScapesDecorder,
'cityscapes/semantic_segmentation_extra': CityScapesDecorder,
}
| 2,418 | 26.804598 | 77 | py |
models | models-master/official/vision/dataloaders/tfds_classification_decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS Classification decoders."""
import tensorflow as tf
from official.vision.dataloaders import decoder
class ClassificationDecorder(decoder.Decoder):
"""A tf.Example decoder for tfds classification datasets."""
def decode(self, serialized_example):
sample_dict = {
'image/encoded':
tf.io.encode_jpeg(serialized_example['image'], quality=100),
'image/class/label':
serialized_example['label'],
}
return sample_dict
TFDS_ID_TO_DECODER_MAP = {
'cifar10': ClassificationDecorder,
'cifar100': ClassificationDecorder,
'imagenet2012': ClassificationDecorder,
'imagenet2012_fewshot/10shot': ClassificationDecorder,
}
| 1,301 | 31.55 | 74 | py |
models | models-master/official/vision/dataloaders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/dataloaders/tfds_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS factory functions."""
from official.vision.dataloaders import decoder as base_decoder
from official.vision.dataloaders import tfds_detection_decoders
from official.vision.dataloaders import tfds_segmentation_decoders
from official.vision.dataloaders import tfds_classification_decoders
def get_classification_decoder(tfds_name: str) -> base_decoder.Decoder:
"""Gets classification decoder.
Args:
tfds_name: `str`, name of the tfds classification decoder.
Returns:
`base_decoder.Decoder` instance.
Raises:
ValueError if the tfds_name doesn't exist in the available decoders.
"""
if tfds_name in tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP:
decoder = tfds_classification_decoders.TFDS_ID_TO_DECODER_MAP[tfds_name]()
else:
raise ValueError(
f'TFDS Classification {tfds_name} is not supported')
return decoder
def get_detection_decoder(tfds_name: str) -> base_decoder.Decoder:
"""Gets detection decoder.
Args:
tfds_name: `str`, name of the tfds detection decoder.
Returns:
`base_decoder.Decoder` instance.
Raises:
ValueError if the tfds_name doesn't exist in the available decoders.
"""
if tfds_name in tfds_detection_decoders.TFDS_ID_TO_DECODER_MAP:
decoder = tfds_detection_decoders.TFDS_ID_TO_DECODER_MAP[tfds_name]()
else:
raise ValueError(f'TFDS Detection {tfds_name} is not supported')
return decoder
def get_segmentation_decoder(tfds_name: str) -> base_decoder.Decoder:
"""Gets segmentation decoder.
Args:
tfds_name: `str`, name of the tfds segmentation decoder.
Returns:
`base_decoder.Decoder` instance.
Raises:
ValueError if the tfds_name doesn't exist in the available decoders.
"""
if tfds_name in tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP:
decoder = tfds_segmentation_decoders.TFDS_ID_TO_DECODER_MAP[tfds_name]()
else:
raise ValueError(f'TFDS Segmentation {tfds_name} is not supported')
return decoder
| 2,568 | 34.680556 | 78 | py |
models | models-master/official/vision/dataloaders/utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataloader utils functions."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.dataloaders import utils
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_process_empty_source_id(self):
source_id = tf.constant([], dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertEqual(-1, utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([128, 256], [128, 256]),
([128, 32, 16], [128, 32, 16]),
)
def test_process_source_id(self, source_id, expected_result):
source_id = tf.constant(source_id, dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertSequenceAlmostEqual(expected_result,
utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([[10, 20, 30, 40]], [[100]], [[0]], 10, None),
([[0.1, 0.2, 0.5, 0.6]], [[0.5]], [[1]], 2, [[1.0, 2.0]]),
)
def test_pad_groundtruths_to_fixed_size(self, boxes, area, classes, size,
attributes):
groundtruths = {}
groundtruths['boxes'] = tf.constant(boxes)
groundtruths['is_crowds'] = tf.constant([[0]])
groundtruths['areas'] = tf.constant(area)
groundtruths['classes'] = tf.constant(classes)
if attributes:
groundtruths['attributes'] = {'depth': tf.constant(attributes)}
actual_result = utils.pad_groundtruths_to_fixed_size(
groundtruths=groundtruths, size=size)
# Check that the first dimension is padded to the expected size.
for key in actual_result:
if key == 'attributes':
for _, v in actual_result[key].items():
pad_shape = v.shape[0]
self.assertEqual(size, pad_shape)
else:
pad_shape = actual_result[key].shape[0]
self.assertEqual(size, pad_shape)
if __name__ == '__main__':
tf.test.main()
| 2,557 | 34.527778 | 80 | py |
models | models-master/official/vision/dataloaders/tfds_factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfds factory functions."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.dataloaders import decoder as base_decoder
from official.vision.dataloaders import tfds_factory
class TFDSFactoryTest(tf.test.TestCase, parameterized.TestCase):
def _create_test_example(self):
serialized_example = {
'image': tf.ones(shape=(100, 100, 3), dtype=tf.uint8),
'label': 1,
'image/id': 0,
'objects': {
'label': 1,
'is_crowd': 0,
'area': 0.5,
'bbox': [0.1, 0.2, 0.3, 0.4]
},
'segmentation_label': tf.ones((100, 100, 1), dtype=tf.uint8),
'image_left': tf.ones(shape=(100, 100, 3), dtype=tf.uint8)
}
return serialized_example
@parameterized.parameters(
('imagenet2012'),
('cifar10'),
('cifar100'),
)
def test_classification_decoder(self, tfds_name):
decoder = tfds_factory.get_classification_decoder(tfds_name)
self.assertIsInstance(decoder, base_decoder.Decoder)
decoded_tensor = decoder.decode(self._create_test_example())
self.assertLen(decoded_tensor, 2)
self.assertIn('image/encoded', decoded_tensor)
self.assertIn('image/class/label', decoded_tensor)
@parameterized.parameters(
('flowers'),
('coco'),
)
def test_doesnt_exit_classification_decoder(self, tfds_name):
with self.assertRaises(ValueError):
_ = tfds_factory.get_classification_decoder(tfds_name)
@parameterized.parameters(
('coco'),
('coco/2014'),
('coco/2017'),
)
def test_detection_decoder(self, tfds_name):
decoder = tfds_factory.get_detection_decoder(tfds_name)
self.assertIsInstance(decoder, base_decoder.Decoder)
decoded_tensor = decoder.decode(self._create_test_example())
self.assertLen(decoded_tensor, 8)
self.assertIn('image', decoded_tensor)
self.assertIn('source_id', decoded_tensor)
self.assertIn('height', decoded_tensor)
self.assertIn('width', decoded_tensor)
self.assertIn('groundtruth_classes', decoded_tensor)
self.assertIn('groundtruth_is_crowd', decoded_tensor)
self.assertIn('groundtruth_area', decoded_tensor)
self.assertIn('groundtruth_boxes', decoded_tensor)
@parameterized.parameters(
('pascal'),
('cityscapes'),
)
def test_doesnt_exit_detection_decoder(self, tfds_name):
with self.assertRaises(ValueError):
_ = tfds_factory.get_detection_decoder(tfds_name)
@parameterized.parameters(
('cityscapes'),
('cityscapes/semantic_segmentation'),
('cityscapes/semantic_segmentation_extra'),
)
def test_segmentation_decoder(self, tfds_name):
decoder = tfds_factory.get_segmentation_decoder(tfds_name)
self.assertIsInstance(decoder, base_decoder.Decoder)
decoded_tensor = decoder.decode(self._create_test_example())
self.assertLen(decoded_tensor, 4)
self.assertIn('image/encoded', decoded_tensor)
self.assertIn('image/segmentation/class/encoded', decoded_tensor)
self.assertIn('image/height', decoded_tensor)
self.assertIn('image/width', decoded_tensor)
@parameterized.parameters(
('coco'),
('imagenet'),
)
def test_doesnt_exit_segmentation_decoder(self, tfds_name):
with self.assertRaises(ValueError):
_ = tfds_factory.get_segmentation_decoder(tfds_name)
if __name__ == '__main__':
tf.test.main()
| 4,010 | 33.878261 | 74 | py |
models | models-master/official/vision/dataloaders/retinanet_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for RetinaNet.
Parse image and ground-truths in a dataset to training targets and package them
into (image, labels) tuple for RetinaNet.
"""
from typing import Optional
# Import libraries
from absl import logging
import tensorflow as tf
from official.vision.dataloaders import parser
from official.vision.dataloaders import utils
from official.vision.ops import anchor
from official.vision.ops import augment
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
match_threshold=0.5,
unmatched_threshold=0.5,
aug_type=None,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
use_autoaugment=False,
autoaugment_policy_name='v0',
skip_crowd_during_training=True,
max_num_instances=100,
dtype='bfloat16',
resize_first: Optional[bool] = None,
mode=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added on each
level. For instances, num_scales=2 adds one additional intermediate
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect ratio
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
match_threshold: `float` number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: `float` number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
aug_type: An optional Augmentation object to choose from AutoAugment and
RandAugment.
aug_rand_hflip: `bool`, if True, augment training with random horizontal
flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
use_autoaugment: `bool`, if True, use the AutoAugment augmentation policy
during training.
autoaugment_policy_name: `string` that specifies the name of the
AutoAugment policy that will be used during training.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
resize_first: Optional `bool`, if True, resize the image before the
augmentations; computationally more efficient.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction or
prediction with ground-truths in the outputs.
"""
self._mode = mode
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# Data augmentation with AutoAugment or RandAugment.
self._augmenter = None
if aug_type is not None:
if aug_type.type == 'autoaug':
logging.info('Using AutoAugment.')
self._augmenter = augment.AutoAugment(
augmentation_name=aug_type.autoaug.augmentation_name,
cutout_const=aug_type.autoaug.cutout_const,
translate_const=aug_type.autoaug.translate_const)
elif aug_type.type == 'randaug':
logging.info('Using RandAugment.')
self._augmenter = augment.RandAugment.build_for_detection(
num_layers=aug_type.randaug.num_layers,
magnitude=aug_type.randaug.magnitude,
cutout_const=aug_type.randaug.cutout_const,
translate_const=aug_type.randaug.translate_const,
prob_to_apply=aug_type.randaug.prob_to_apply,
exclude_ops=aug_type.randaug.exclude_ops)
else:
raise ValueError(f'Augmentation policy {aug_type.type} not supported.')
# Deprecated. Data Augmentation with AutoAugment.
self._use_autoaugment = use_autoaugment
self._autoaugment_policy_name = autoaugment_policy_name
# Data type.
self._dtype = dtype
# Input pipeline optimization.
self._resize_first = resize_first
def _resize_and_crop_image_and_boxes(self, image, boxes, pad=True):
"""Resizes and crops image and boxes, optionally with padding."""
# Resizes and crops image.
padded_size = None
if pad:
padded_size = preprocess_ops.compute_padded_size(self._output_size,
2**self._max_level)
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
padded_size=padded_size,
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = preprocess_ops.resize_and_crop_boxes(boxes, image_scale,
image_info[1, :], offset)
return image, boxes, image_info
def _parse_train_data(self, data, anchor_labeler=None):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
# If not empty, `attributes` is a dict of (name, ground_truth) pairs.
# `ground_truth` of attributes is assumed in shape [N, attribute_size].
attributes = data.get('groundtruth_attributes', {})
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training:
num_groundtruths = tf.shape(input=classes)[0]
with tf.control_dependencies([num_groundtruths, is_crowds]):
indices = tf.cond(
pred=tf.greater(tf.size(input=is_crowds), 0),
true_fn=lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
false_fn=lambda: tf.cast(tf.range(num_groundtruths), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
for k, v in attributes.items():
attributes[k] = tf.gather(v, indices)
# Gets original image.
image = data['image']
image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
less_output_pixels = (
self._output_size[0] * self._output_size[1]
) < image_size[0] * image_size[1]
# Resizing first can reduce augmentation computation if the original image
# has more pixels than the desired output image.
# There might be a smarter threshold to compute less_output_pixels as
# we keep the padding to the very end, i.e., a resized image likely has less
# pixels than self._output_size[0] * self._output_size[1].
resize_first = self._resize_first and less_output_pixels
if resize_first:
image, boxes, image_info = self._resize_and_crop_image_and_boxes(
image, boxes, pad=False)
# Apply autoaug or randaug.
if self._augmenter is not None:
image, boxes = self._augmenter.distort_with_boxes(image, boxes)
image_shape = tf.shape(input=image)[0:2]
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, boxes, _ = preprocess_ops.random_horizontal_flip(image, boxes)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_ops.denormalize_boxes(boxes, image_shape)
if not resize_first:
image, boxes, image_info = self._resize_and_crop_image_and_boxes(
image, boxes, pad=True)
else:
padded_size = preprocess_ops.compute_padded_size(self._output_size,
2**self._max_level)
image = tf.image.pad_to_bounding_box(
image, 0, 0, padded_size[0], padded_size[1])
image_height, image_width, _ = image.get_shape().as_list()
# Filters out ground-truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
for k, v in attributes.items():
attributes[k] = tf.gather(v, indices)
# Assigns anchors.
input_anchor = anchor.build_anchor_generator(
min_level=self._min_level,
max_level=self._max_level,
num_scales=self._num_scales,
aspect_ratios=self._aspect_ratios,
anchor_size=self._anchor_size)
anchor_boxes = input_anchor(image_size=(image_height, image_width))
if anchor_labeler is None:
anchor_labeler = anchor.AnchorLabeler(
self._match_threshold, self._unmatched_threshold
)
(cls_targets, box_targets, att_targets, cls_weights,
box_weights) = anchor_labeler.label_anchors(
anchor_boxes, boxes, tf.expand_dims(classes, axis=1), attributes)
# Casts input image to desired data type.
image = tf.cast(image, dtype=self._dtype)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': anchor_boxes,
'cls_weights': cls_weights,
'box_weights': box_weights,
'image_info': image_info,
}
if att_targets:
labels['attribute_targets'] = att_targets
return image, labels
def _parse_eval_data(self, data, anchor_labeler=None):
"""Parses data for training and evaluation."""
groundtruths = {}
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
# If not empty, `attributes` is a dict of (name, ground_truth) pairs.
# `ground_truth` of attributes is assumed in shape [N, attribute_size].
attributes = data.get('groundtruth_attributes', {})
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(input=image)[0:2]
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_ops.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
padded_size=preprocess_ops.compute_padded_size(self._output_size,
2**self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = preprocess_ops.resize_and_crop_boxes(boxes, image_scale,
image_info[1, :], offset)
# Filters out ground-truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
for k, v in attributes.items():
attributes[k] = tf.gather(v, indices)
# Assigns anchors.
input_anchor = anchor.build_anchor_generator(
min_level=self._min_level,
max_level=self._max_level,
num_scales=self._num_scales,
aspect_ratios=self._aspect_ratios,
anchor_size=self._anchor_size)
anchor_boxes = input_anchor(image_size=(image_height, image_width))
if anchor_labeler is None:
anchor_labeler = anchor.AnchorLabeler(
self._match_threshold, self._unmatched_threshold
)
(cls_targets, box_targets, att_targets, cls_weights,
box_weights) = anchor_labeler.label_anchors(
anchor_boxes, boxes, tf.expand_dims(classes, axis=1), attributes)
# Casts input image to desired data type.
image = tf.cast(image, dtype=self._dtype)
# Sets up ground-truth data for evaluation.
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'image_info': image_info,
'boxes': box_ops.denormalize_boxes(
data['groundtruth_boxes'], image_shape),
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
if 'groundtruth_attributes' in data:
groundtruths['attributes'] = data['groundtruth_attributes']
groundtruths['source_id'] = utils.process_source_id(
groundtruths['source_id'])
groundtruths = utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': anchor_boxes,
'cls_weights': cls_weights,
'box_weights': box_weights,
'image_info': image_info,
'groundtruths': groundtruths,
}
if att_targets:
labels['attribute_targets'] = att_targets
return image, labels
| 15,290 | 40.215633 | 82 | py |
models | models-master/official/vision/dataloaders/classification_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification decoder and parser."""
from typing import Any, Dict, List, Optional, Tuple
# Import libraries
import tensorflow as tf
from official.vision.configs import common
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
from official.vision.ops import augment
from official.vision.ops import preprocess_ops
DEFAULT_IMAGE_FIELD_KEY = 'image/encoded'
DEFAULT_LABEL_FIELD_KEY = 'image/class/label'
class Decoder(decoder.Decoder):
"""A tf.Example decoder for classification task."""
def __init__(self,
image_field_key: str = DEFAULT_IMAGE_FIELD_KEY,
label_field_key: str = DEFAULT_LABEL_FIELD_KEY,
is_multilabel: bool = False,
keys_to_features: Optional[Dict[str, Any]] = None):
if not keys_to_features:
keys_to_features = {
image_field_key:
tf.io.FixedLenFeature((), tf.string, default_value=''),
}
if is_multilabel:
keys_to_features.update(
{label_field_key: tf.io.VarLenFeature(dtype=tf.int64)})
else:
keys_to_features.update({
label_field_key:
tf.io.FixedLenFeature((), tf.int64, default_value=-1)
})
self._keys_to_features = keys_to_features
def decode(self, serialized_example):
return tf.io.parse_single_example(serialized_example,
self._keys_to_features)
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size: List[int],
num_classes: float,
image_field_key: str = DEFAULT_IMAGE_FIELD_KEY,
label_field_key: str = DEFAULT_LABEL_FIELD_KEY,
decode_jpeg_only: bool = True,
aug_rand_hflip: bool = True,
aug_crop: Optional[bool] = True,
aug_type: Optional[common.Augmentation] = None,
color_jitter: float = 0.,
random_erasing: Optional[common.RandomErasing] = None,
is_multilabel: bool = False,
dtype: str = 'float32',
crop_area_range: Optional[Tuple[float, float]] = (0.08, 1.0),
center_crop_fraction: Optional[
float] = preprocess_ops.CENTER_CROP_FRACTION,
tf_resize_method: str = 'bilinear',
three_augment: bool = False):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
num_classes: `float`, number of classes.
image_field_key: `str`, the key name to encoded image or decoded image
matrix in tf.Example.
label_field_key: `str`, the key name to label in tf.Example.
decode_jpeg_only: `bool`, if True, only JPEG format is decoded, this is
faster than decoding other types. Default is True.
aug_rand_hflip: `bool`, if True, augment training with random horizontal
flip.
aug_crop: `bool`, if True, perform random cropping during training and
center crop during validation.
aug_type: An optional Augmentation object to choose from AutoAugment and
RandAugment.
color_jitter: Magnitude of color jitter. If > 0, the value is used to
generate random scale factor for brightness, contrast and saturation.
See `preprocess_ops.color_jitter` for more details.
random_erasing: if not None, augment input image by random erasing. See
`augment.RandomErasing` for more details.
is_multilabel: A `bool`, whether or not each example has multiple labels.
dtype: `str`, cast output image in dtype. It can be 'float32', 'float16',
or 'bfloat16'.
crop_area_range: An optional `tuple` of (min_area, max_area) for image
random crop function to constraint crop operation. The cropped areas
of the image must contain a fraction of the input image within this
range. The default area range is (0.08, 1.0).
https://arxiv.org/abs/2204.07118.
center_crop_fraction: center_crop_fraction.
tf_resize_method: A `str`, interpolation method for resizing image.
three_augment: A bool, whether to apply three augmentations.
"""
self._output_size = output_size
self._aug_rand_hflip = aug_rand_hflip
self._aug_crop = aug_crop
self._num_classes = num_classes
self._image_field_key = image_field_key
if dtype == 'float32':
self._dtype = tf.float32
elif dtype == 'float16':
self._dtype = tf.float16
elif dtype == 'bfloat16':
self._dtype = tf.bfloat16
else:
raise ValueError('dtype {!r} is not supported!'.format(dtype))
if aug_type:
if aug_type.type == 'autoaug':
self._augmenter = augment.AutoAugment(
augmentation_name=aug_type.autoaug.augmentation_name,
cutout_const=aug_type.autoaug.cutout_const,
translate_const=aug_type.autoaug.translate_const)
elif aug_type.type == 'randaug':
self._augmenter = augment.RandAugment(
num_layers=aug_type.randaug.num_layers,
magnitude=aug_type.randaug.magnitude,
cutout_const=aug_type.randaug.cutout_const,
translate_const=aug_type.randaug.translate_const,
prob_to_apply=aug_type.randaug.prob_to_apply,
exclude_ops=aug_type.randaug.exclude_ops)
else:
raise ValueError('Augmentation policy {} not supported.'.format(
aug_type.type))
else:
self._augmenter = None
self._label_field_key = label_field_key
self._color_jitter = color_jitter
if random_erasing:
self._random_erasing = augment.RandomErasing(
probability=random_erasing.probability,
min_area=random_erasing.min_area,
max_area=random_erasing.max_area,
min_aspect=random_erasing.min_aspect,
max_aspect=random_erasing.max_aspect,
min_count=random_erasing.min_count,
max_count=random_erasing.max_count,
trials=random_erasing.trials)
else:
self._random_erasing = None
self._is_multilabel = is_multilabel
self._decode_jpeg_only = decode_jpeg_only
self._crop_area_range = crop_area_range
self._center_crop_fraction = center_crop_fraction
self._tf_resize_method = tf_resize_method
self._three_augment = three_augment
def _parse_train_data(self, decoded_tensors):
"""Parses data for training."""
image = self._parse_train_image(decoded_tensors)
label = tf.cast(decoded_tensors[self._label_field_key], dtype=tf.int32)
if self._is_multilabel:
if isinstance(label, tf.sparse.SparseTensor):
label = tf.sparse.to_dense(label)
label = tf.reduce_sum(tf.one_hot(label, self._num_classes), axis=0)
return image, label
def _parse_eval_data(self, decoded_tensors):
"""Parses data for evaluation."""
image = self._parse_eval_image(decoded_tensors)
label = tf.cast(decoded_tensors[self._label_field_key], dtype=tf.int32)
if self._is_multilabel:
if isinstance(label, tf.sparse.SparseTensor):
label = tf.sparse.to_dense(label)
label = tf.reduce_sum(tf.one_hot(label, self._num_classes), axis=0)
return image, label
def _parse_train_image(self, decoded_tensors):
"""Parses image data for training."""
image_bytes = decoded_tensors[self._image_field_key]
require_decoding = (
not tf.is_tensor(image_bytes) or image_bytes.dtype == tf.dtypes.string
)
if (
require_decoding
and self._decode_jpeg_only
and self._aug_crop
):
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Crops image.
cropped_image = preprocess_ops.random_crop_image_v2(
image_bytes, image_shape, area_range=self._crop_area_range)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),
lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),
lambda: cropped_image)
else:
if require_decoding:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
else:
# Already decoded image matrix
image = image_bytes
# Crops image.
if self._aug_crop:
cropped_image = preprocess_ops.random_crop_image(
image, area_range=self._crop_area_range)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), tf.shape(image))),
lambda: preprocess_ops.center_crop_image(image),
lambda: cropped_image)
if self._aug_rand_hflip:
image = tf.image.random_flip_left_right(image)
# Color jitter.
if self._color_jitter > 0:
image = preprocess_ops.color_jitter(image, self._color_jitter,
self._color_jitter,
self._color_jitter)
# Resizes image.
image = tf.image.resize(
image, self._output_size, method=self._tf_resize_method)
image.set_shape([self._output_size[0], self._output_size[1], 3])
# Apply autoaug or randaug.
if self._augmenter is not None:
image = self._augmenter.distort(image)
# Three augmentation
if self._three_augment:
image = augment.AutoAugment(
augmentation_name='deit3_three_augment',
translate_const=20,
).distort(image)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
# Random erasing after the image has been normalized
if self._random_erasing is not None:
image = self._random_erasing.distort(image)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
return image
def _parse_eval_image(self, decoded_tensors):
"""Parses image data for evaluation."""
image_bytes = decoded_tensors[self._image_field_key]
require_decoding = (
not tf.is_tensor(image_bytes) or image_bytes.dtype == tf.dtypes.string
)
if (
require_decoding
and self._decode_jpeg_only
and self._aug_crop
):
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Center crops.
image = preprocess_ops.center_crop_image_v2(
image_bytes, image_shape, self._center_crop_fraction)
else:
if require_decoding:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
else:
# Already decoded image matrix
image = image_bytes
# Center crops.
if self._aug_crop:
image = preprocess_ops.center_crop_image(
image, self._center_crop_fraction)
image = tf.image.resize(
image, self._output_size, method=self._tf_resize_method)
image.set_shape([self._output_size[0], self._output_size[1], 3])
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
return image
def parse_train_image(self, decoded_tensors: Dict[str,
tf.Tensor]) -> tf.Tensor:
"""Public interface for parsing image data for training."""
return self._parse_train_image(decoded_tensors)
@classmethod
def inference_fn(cls,
image: tf.Tensor,
input_image_size: List[int],
num_channels: int = 3) -> tf.Tensor:
"""Builds image model inputs for serving."""
image = tf.cast(image, dtype=tf.float32)
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, input_image_size, method=tf.image.ResizeMethod.BILINEAR)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image.set_shape(input_image_size + [num_channels])
return image
| 13,029 | 38.484848 | 82 | py |
models | models-master/official/vision/dataloaders/maskrcnn_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for Mask R-CNN."""
from typing import Optional
# Import libraries
import tensorflow as tf
from official.vision.configs import common
from official.vision.dataloaders import parser
from official.vision.dataloaders import utils
from official.vision.ops import anchor
from official.vision.ops import augment
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
rpn_match_threshold=0.7,
rpn_unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
aug_rand_hflip=False,
aug_rand_vflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_type: Optional[common.Augmentation] = None,
skip_crowd_during_training=True,
max_num_instances=100,
include_mask=False,
outer_boxes_scale=1.0,
mask_crop_size=112,
dtype='float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect ratio
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
rpn_match_threshold:
rpn_unmatched_threshold:
rpn_batch_size_per_im:
rpn_fg_fraction:
aug_rand_hflip: `bool`, if True, augment training with random horizontal
flip.
aug_rand_vflip: `bool`, if True, augment training with random vertical
flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
aug_type: An optional Augmentation object with params for AutoAugment.
The AutoAug policy should not use rotation/translation/shear.
Only in-place augmentations can be used.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The ground-truth data will be padded to `max_num_instances`.
include_mask: a bool to indicate whether parse mask ground-truth.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
mask_crop_size: the size which ground-truth mask is cropped to.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
"""
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
# Target assigning.
self._rpn_match_threshold = rpn_match_threshold
self._rpn_unmatched_threshold = rpn_unmatched_threshold
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_rand_vflip = aug_rand_vflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
if aug_type and aug_type.type:
if aug_type.type == 'autoaug':
self._augmenter = augment.AutoAugment(
augmentation_name=aug_type.autoaug.augmentation_name,
cutout_const=aug_type.autoaug.cutout_const,
translate_const=aug_type.autoaug.translate_const)
elif aug_type.type == 'randaug':
self._augmenter = augment.RandAugment(
num_layers=aug_type.randaug.num_layers,
magnitude=aug_type.randaug.magnitude,
cutout_const=aug_type.randaug.cutout_const,
translate_const=aug_type.randaug.translate_const,
prob_to_apply=aug_type.randaug.prob_to_apply,
exclude_ops=aug_type.randaug.exclude_ops)
else:
raise ValueError('Augmentation policy {} not supported.'.format(
aug_type.type))
else:
self._augmenter = None
# Mask.
self._include_mask = include_mask
self._outer_boxes_scale = outer_boxes_scale
self._mask_crop_size = mask_crop_size
# Image output dtype.
self._dtype = dtype
def _parse_train_data(self, data):
"""Parses data for training.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following describes
{key: value} pairs in the dictionary.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
rpn_score_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
rpn_box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Ground-truth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
image that is fed to the network. The tennsor is padded with -1 to
the fixed dimension [self._max_num_instances, 4].
gt_classes: Ground-truth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by mask_crop_size.
"""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
if self._include_mask:
masks = data['groundtruth_instance_masks']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training:
num_groundtruths = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtruths, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtruths), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
if self._include_mask:
masks = tf.gather(masks, indices)
# Gets original image and its size.
image = data['image']
if self._augmenter is not None:
image = self._augmenter.distort(image)
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
# Flips image randomly during training.
image, boxes, masks = preprocess_ops.random_horizontal_flip(
image,
boxes,
masks=None if not self._include_mask else masks,
prob=tf.where(self._aug_rand_hflip, 0.5, 0.0),
)
image, boxes, masks = preprocess_ops.random_vertical_flip(
image,
boxes,
masks=None if not self._include_mask else masks,
prob=tf.where(self._aug_rand_vflip, 0.5, 0.0),
)
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = box_ops.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
padded_size=preprocess_ops.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
# Now the coordinates of boxes are w.r.t the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = preprocess_ops.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground-truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
if self._include_mask:
outer_boxes = box_ops.compute_outer_boxes(boxes, image_info[1, :],
self._outer_boxes_scale)
masks = tf.gather(masks, indices)
# Transfer boxes to the original image space and do normalization.
cropped_boxes = outer_boxes + tf.tile(
tf.expand_dims(offset, axis=0), [1, 2])
cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
cropped_boxes = box_ops.normalize_boxes(cropped_boxes, image_shape)
num_masks = tf.shape(masks)[0]
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
cropped_boxes,
box_indices=tf.range(num_masks, dtype=tf.int32),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear')
masks = tf.squeeze(masks, axis=-1)
# Assigns anchor targets.
# Note that after the target assignment, box targets are absolute pixel
# offsets w.r.t. the scaled image.
input_anchor = anchor.build_anchor_generator(
min_level=self._min_level,
max_level=self._max_level,
num_scales=self._num_scales,
aspect_ratios=self._aspect_ratios,
anchor_size=self._anchor_size)
anchor_boxes = input_anchor(image_size=(image_height, image_width))
anchor_labeler = anchor.RpnAnchorLabeler(
self._rpn_match_threshold,
self._rpn_unmatched_threshold,
self._rpn_batch_size_per_im,
self._rpn_fg_fraction)
rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
anchor_boxes, boxes,
tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))
# Casts input image to self._dtype
image = tf.cast(image, dtype=self._dtype)
boxes = preprocess_ops.clip_or_pad_to_fixed_size(
boxes, self._max_num_instances, -1)
classes = preprocess_ops.clip_or_pad_to_fixed_size(
classes, self._max_num_instances, -1)
# Packs labels for model_fn outputs.
labels = {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
'rpn_score_targets': rpn_score_targets,
'rpn_box_targets': rpn_box_targets,
'gt_boxes': boxes,
'gt_classes': classes,
}
if self._include_mask:
outer_boxes = preprocess_ops.clip_or_pad_to_fixed_size(
outer_boxes, self._max_num_instances, -1)
masks = preprocess_ops.clip_or_pad_to_fixed_size(
masks, self._max_num_instances, -1)
labels.update({
'gt_outer_boxes': outer_boxes,
'gt_masks': masks,
})
return image, labels
def _parse_eval_data(self, data):
"""Parses data for evaluation.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
A dictionary of {'images': image, 'labels': labels} where
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following
describes {key: value} pairs in the dictionary.
source_ids: Source image id. Default value -1 if the source id is
empty in the ground-truth annotation.
image_info: a 2D `Tensor` that encodes the information of the image
and the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each
level.
"""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
padded_size=preprocess_ops.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# Casts input image to self._dtype
image = tf.cast(image, dtype=self._dtype)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_ops.denormalize_boxes(data['groundtruth_boxes'], image_shape)
# Compute Anchor boxes.
input_anchor = anchor.build_anchor_generator(
min_level=self._min_level,
max_level=self._max_level,
num_scales=self._num_scales,
aspect_ratios=self._aspect_ratios,
anchor_size=self._anchor_size)
anchor_boxes = input_anchor(image_size=(image_height, image_width))
labels = {
'image_info': image_info,
'anchor_boxes': anchor_boxes,
}
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes'])[0],
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = utils.process_source_id(
groundtruths['source_id'])
groundtruths = utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
if self._include_mask:
masks = data['groundtruth_instance_masks']
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
boxes=data['groundtruth_boxes'],
box_indices=tf.range(tf.shape(masks)[0], dtype=tf.int32),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear',
)
masks = tf.squeeze(masks, axis=-1)
groundtruths['masks'] = preprocess_ops.clip_or_pad_to_fixed_size(
masks, self._max_num_instances, -1
)
labels['groundtruths'] = groundtruths
return image, labels
| 16,837 | 40.472906 | 82 | py |
models | models-master/official/vision/dataloaders/segmentation_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for segmentation datasets."""
import tensorflow as tf
from official.vision.configs import semantic_segmentation as config_lib
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
from official.vision.dataloaders import utils
from official.vision.ops import preprocess_ops
class Decoder(decoder.Decoder):
"""A tf.Example decoder for segmentation task."""
def __init__(self,
image_feature=config_lib.DenseFeatureConfig(),
additional_dense_features=None):
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'image/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
image_feature.feature_name:
tf.io.FixedLenFeature((), tf.string, default_value='')
}
if additional_dense_features:
for feature in additional_dense_features:
self._keys_to_features[feature.feature_name] = tf.io.FixedLenFeature(
(), tf.string, default_value='')
def decode(self, serialized_example):
return tf.io.parse_single_example(serialized_example,
self._keys_to_features)
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
crop_size=None,
resize_eval_groundtruth=True,
gt_is_matting_map=False,
groundtruth_padded_size=None,
ignore_label=255,
aug_rand_hflip=False,
preserve_aspect_ratio=True,
aug_scale_min=1.0,
aug_scale_max=1.0,
dtype='float32',
image_feature=config_lib.DenseFeatureConfig(),
additional_dense_features=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
crop_size: `Tensor` or `list` for [height, width] of the crop. If
specified a training crop of size crop_size is returned. This is useful
for cropping original images during training while evaluating on
original image sizes.
resize_eval_groundtruth: `bool`, if True, eval ground-truth masks are
resized to output_size.
gt_is_matting_map: `bool`, if True, the expected mask is in the range
between 0 and 255. The parser will normalize the value of the mask into
the range between 0 and 1.
groundtruth_padded_size: `Tensor` or `list` for [height, width]. When
resize_eval_groundtruth is set to False, the ground-truth masks are
padded to this size.
ignore_label: `int` the pixel with ignore label will not used for training
and evaluation.
aug_rand_hflip: `bool`, if True, augment training with random horizontal
flip.
preserve_aspect_ratio: `bool`, if True, the aspect ratio is preserved,
otherwise, the image is resized to output_size.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
image_feature: the config for the image input (usually RGB). Defaults to
the config for a 3-channel image with key = `image/encoded` and ImageNet
dataset mean/stddev.
additional_dense_features: `list` of DenseFeatureConfig for additional
dense features.
"""
self._output_size = output_size
self._crop_size = crop_size
self._resize_eval_groundtruth = resize_eval_groundtruth
if (not resize_eval_groundtruth) and (groundtruth_padded_size is None):
raise ValueError('groundtruth_padded_size ([height, width]) needs to be'
'specified when resize_eval_groundtruth is False.')
self._gt_is_matting_map = gt_is_matting_map
self._groundtruth_padded_size = groundtruth_padded_size
self._ignore_label = ignore_label
self._preserve_aspect_ratio = preserve_aspect_ratio
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# dtype.
self._dtype = dtype
self._image_feature = image_feature
self._additional_dense_features = additional_dense_features
def _prepare_image_and_label(self, data):
"""Prepare normalized image and label."""
height = data['image/height']
width = data['image/width']
label = tf.io.decode_image(
data['image/segmentation/class/encoded'], channels=1)
label = tf.reshape(label, (1, height, width))
label = tf.cast(label, tf.float32)
image = tf.io.decode_image(
data[self._image_feature.feature_name],
channels=self._image_feature.num_channels,
dtype=tf.uint8)
image = tf.reshape(image, (height, width, self._image_feature.num_channels))
# Normalizes the image feature with mean and std values, which are divided
# by 255 because an uint8 image are re-scaled automatically. Images other
# than uint8 type will be wrongly normalized.
image = preprocess_ops.normalize_image(
image, [mean / 255.0 for mean in self._image_feature.mean],
[stddev / 255.0 for stddev in self._image_feature.stddev])
if self._additional_dense_features:
input_list = [image]
for feature_cfg in self._additional_dense_features:
feature = tf.io.decode_image(
data[feature_cfg.feature_name],
channels=feature_cfg.num_channels,
dtype=tf.uint8)
feature = tf.reshape(feature, (height, width, feature_cfg.num_channels))
feature = preprocess_ops.normalize_image(
feature, [mean / 255.0 for mean in feature_cfg.mean],
[stddev / 255.0 for stddev in feature_cfg.stddev])
input_list.append(feature)
concat_input = tf.concat(input_list, axis=2)
else:
concat_input = image
if not self._preserve_aspect_ratio:
label = tf.reshape(label, [data['image/height'], data['image/width'], 1])
concat_input = tf.image.resize(
concat_input, self._output_size, method='bilinear')
label = tf.image.resize(label, self._output_size, method='nearest')
label = tf.reshape(label[:, :, -1], [1] + self._output_size)
return concat_input, label
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
image, label = self._prepare_image_and_label(data)
# Normalize the label into the range of 0 and 1 for matting ground-truth.
# Note that the input ground-truth labels must be 0 to 255, and do not
# contain ignore_label. For gt_is_matting_map case, ignore_label is only
# used for padding the labels.
if self._gt_is_matting_map:
scale = tf.constant(255.0, dtype=tf.float32)
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
label = tf.cast(label, tf.float32) / scale
if self._crop_size:
label = tf.reshape(label, [data['image/height'], data['image/width'], 1])
# If output_size is specified, resize image, and label to desired
# output_size.
if self._output_size:
image = tf.image.resize(image, self._output_size, method='bilinear')
label = tf.image.resize(label, self._output_size, method='nearest')
image_mask = tf.concat([image, label], axis=2)
image_mask_crop = tf.image.random_crop(
image_mask, self._crop_size + [tf.shape(image_mask)[-1]])
image = image_mask_crop[:, :, :-1]
label = tf.reshape(image_mask_crop[:, :, -1], [1] + self._crop_size)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, _, label = preprocess_ops.random_horizontal_flip(
image, masks=label)
train_image_size = self._crop_size if self._crop_size else self._output_size
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
train_image_size,
train_image_size,
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
# Pad label and make sure the padded region assigned to the ignore label.
# The label is first offset by +1 and then padded with 0.
label += 1
label = tf.expand_dims(label, axis=3)
label = preprocess_ops.resize_and_crop_masks(label, image_scale,
train_image_size, offset)
label -= 1
label = tf.where(
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label)
label = tf.squeeze(label, axis=0)
valid_mask = tf.not_equal(label, self._ignore_label)
labels = {
'masks': label,
'valid_masks': valid_mask,
'image_info': image_info,
}
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
image, label = self._prepare_image_and_label(data)
# Binarize mask if ground-truth is a matting map
if self._gt_is_matting_map:
label = tf.divide(tf.cast(label, dtype=tf.float32), 255.0)
label = utils.binarize_matting_map(label)
# The label is first offset by +1 and then padded with 0.
label += 1
label = tf.expand_dims(label, axis=3)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image, self._output_size, self._output_size)
if self._resize_eval_groundtruth:
# Resizes eval masks to match input image sizes. In that case, mean IoU
# is computed on output_size not the original size of the images.
image_scale = image_info[2, :]
offset = image_info[3, :]
label = preprocess_ops.resize_and_crop_masks(label, image_scale,
self._output_size, offset)
else:
label = tf.image.pad_to_bounding_box(label, 0, 0,
self._groundtruth_padded_size[0],
self._groundtruth_padded_size[1])
label -= 1
label = tf.where(
tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label)
label = tf.squeeze(label, axis=0)
valid_mask = tf.not_equal(label, self._ignore_label)
labels = {
'masks': label,
'valid_masks': valid_mask,
'image_info': image_info
}
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
| 11,828 | 40.072917 | 82 | py |
models | models-master/official/vision/utils/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/utils/summary_manager.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom summary manager utilities."""
import os
from typing import Any, Callable, Dict, Optional
import orbit
import tensorflow as tf
from official.core import config_definitions
class ImageScalarSummaryManager(orbit.utils.SummaryManager):
"""Class of custom summary manager that creates scalar and image summary."""
def __init__(
self,
summary_dir: str,
scalar_summary_fn: Callable[..., Any],
image_summary_fn: Optional[Callable[..., Any]],
max_outputs: int = 20,
global_step=None,
):
"""Initializes the `ImageScalarSummaryManager` instance."""
self._enabled = summary_dir is not None
self._summary_dir = summary_dir
self._scalar_summary_fn = scalar_summary_fn
self._image_summary_fn = image_summary_fn
self._summary_writers = {}
self._max_outputs = max_outputs
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
def _write_summaries(
self, summary_dict: Dict[str, Any], relative_path: str = ''
):
for name, value in summary_dict.items():
if isinstance(value, dict):
self._write_summaries(
value, relative_path=os.path.join(relative_path, name)
)
else:
with self.summary_writer(relative_path).as_default():
if name.startswith('image/'):
self._image_summary_fn(
name, value, self._global_step, max_outputs=self._max_outputs
)
else:
self._scalar_summary_fn(name, value, self._global_step)
def maybe_build_eval_summary_manager(
params: config_definitions.ExperimentConfig, model_dir: str
) -> Optional[orbit.utils.SummaryManager]:
"""Maybe creates a SummaryManager."""
if (
hasattr(params.task, 'allow_image_summary')
and params.task.allow_image_summary
):
eval_summary_dir = os.path.join(
model_dir, params.trainer.validation_summary_subdir
)
return ImageScalarSummaryManager(
eval_summary_dir,
scalar_summary_fn=tf.summary.scalar,
image_summary_fn=tf.summary.image,
)
return None
| 2,763 | 31.517647 | 78 | py |
models | models-master/official/vision/utils/object_detection/argmax_matcher.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
from official.vision.utils.object_detection import matcher
from official.vision.utils.object_detection import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower threshold)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative or Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non-empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column.
matches = tf.argmax(input=similarity_matrix, axis=0, output_type=tf.int32)
# Deal with matched and unmatched threshold.
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(input_tensor=similarity_matrix, axis=0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(
input=similarity_matrix, axis=1, output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(
input=force_match_column_indicators, axis=0, output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(input_tensor=force_match_column_indicators, axis=0),
tf.bool)
final_matches = tf.where(force_match_column_mask, force_match_row_ids,
matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape.dims[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
pred=tf.greater(tf.shape(input=similarity_matrix)[0], 0),
true_fn=_match_when_rows_are_non_empty,
false_fn=_match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| 9,013 | 43.623762 | 80 | py |
models | models-master/official/vision/utils/object_detection/visualization_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
from typing import Any, Dict, Optional, List, Union
from absl import logging
# Set headless-friendly backend.
import matplotlib
matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
import six
import tensorflow as tf
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
from official.vision.utils.object_detection import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.io.gfile.GFile(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def visualize_images_with_bounding_boxes(images, box_outputs, step,
summary_writer):
"""Records subset of evaluation images with bounding boxes."""
if not isinstance(images, list):
logging.warning(
'visualize_images_with_bounding_boxes expects list of '
'images but received type: %s and value: %s', type(images), images)
return
image_shape = tf.shape(images[0])
image_height = tf.cast(image_shape[0], tf.float32)
image_width = tf.cast(image_shape[1], tf.float32)
normalized_boxes = box_ops.normalize_boxes(box_outputs,
[image_height, image_width])
bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]])
image_summary = tf.image.draw_bounding_boxes(
tf.cast(images, tf.float32), normalized_boxes, bounding_box_color)
with summary_writer.as_default():
tf.summary.image('bounding_box_summary', image_summary, step=step)
summary_writer.flush()
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
if hasattr(font, 'getsize'):
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
else:
display_str_heights = [font.getbbox(ds)[3] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
try:
if hasattr(font, 'getsize'):
text_width, text_height = font.getsize(display_str)
else:
text_width, text_height = font.getbbox(display_str)[2:4]
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font,
)
except ValueError:
pass
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(image, boxes, classes, scores,
masks, keypoints, category_index,
**kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize(
image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def visualize_outputs(
logs,
task_config,
original_image_spatial_shape=None,
true_image_shape=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=False,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
key: str = 'image/validation_outputs',
) -> Dict[str, Any]:
"""Visualizes the detection outputs.
It extracts images and predictions from logs and draws visualization on input
images. By default, it requires `detection_boxes`, `detection_classes` and
`detection_scores` in the prediction, and optionally accepts
`detection_keypoints` and `detection_masks`.
Args:
logs: A dictionaty of log that contains images and predictions.
task_config: A task config.
original_image_spatial_shape: A [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: A [N, 3] tensor containing the spatial size of unpadded
original_image.
max_boxes_to_draw: The maximum number of boxes to draw on an image. Default
20.
min_score_thresh: The minimum score threshold for visualization. Default
0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
False.
image_mean: An optional float or list of floats used as the mean pixel value
to normalize images.
image_std: An optional float or list of floats used as the std to normalize
images.
key: A string specifying the key of the returned dictionary.
Returns:
A dictionary of images with visualization drawn on it. Each key corresponds
to a 4D tensor with predictions (boxes, segments and/or keypoints) drawn
on each image.
"""
images = logs['image']
boxes = logs['detection_boxes']
classes = tf.cast(logs['detection_classes'], dtype=tf.int32)
scores = logs['detection_scores']
num_classes = task_config.model.num_classes
keypoints = (
logs['detection_keypoints'] if 'detection_keypoints' in logs else None
)
instance_masks = (
logs['detection_masks'] if 'detection_masks' in logs else None
)
category_index = {}
for i in range(1, num_classes + 1):
category_index[i] = {'id': i, 'name': str(i)}
def _denormalize_images(images: tf.Tensor) -> tf.Tensor:
if image_mean is None and image_std is None:
images *= tf.constant(
preprocess_ops.STDDEV_RGB, shape=[1, 1, 3], dtype=images.dtype
)
images += tf.constant(
preprocess_ops.MEAN_RGB, shape=[1, 1, 3], dtype=images.dtype
)
elif image_mean is not None and image_std is not None:
if isinstance(image_mean, float) and isinstance(image_std, float):
images = images * image_std + image_mean
elif isinstance(image_mean, list) and isinstance(image_std, list):
images *= tf.constant(image_std, shape=[1, 1, 3], dtype=images.dtype)
images += tf.constant(image_mean, shape=[1, 1, 3], dtype=images.dtype)
else:
raise ValueError(
'`image_mean` and `image_std` should be the same type.'
)
else:
raise ValueError(
'Both `image_mean` and `image_std` should be set or None at the same '
'time.'
)
return tf.cast(images, dtype=tf.uint8)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
_denormalize_images,
elems=images,
fn_output_signature=tf.TensorSpec(
shape=images.shape.as_list()[1:], dtype=tf.uint8
),
parallel_iterations=32,
),
)
images_with_boxes = draw_bounding_boxes_on_image_tensors(
images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape,
true_image_shape,
instance_masks,
keypoints,
max_boxes_to_draw,
min_score_thresh,
use_normalized_coordinates,
)
outputs = {}
for i, image in enumerate(images_with_boxes):
outputs[key + f'/{i}'] = image[None, ...]
return outputs
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks
]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores, keypoints
]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks, keypoints
]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.compat.v1.py_func(visualize_boxes_fn,
image_and_detections[2:], tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] %
len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1)
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.compat.v1.py_func(cdf_plot, [values], tf.uint8)
tf.compat.v1.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1)
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.compat.v1.py_func(hist_plot, [values, bins], tf.uint8)
tf.compat.v1.summary.image(name, hist_plot)
def update_detection_state(step_outputs=None) -> Dict[str, Any]:
"""Updates detection state to optionally add input image and predictions."""
state = {}
if step_outputs:
state['image'] = tf.concat(step_outputs['visualization'][0], axis=0)
state['detection_boxes'] = tf.concat(
step_outputs['visualization'][1]['detection_boxes'], axis=0
)
state['detection_classes'] = tf.concat(
step_outputs['visualization'][1]['detection_classes'], axis=0
)
state['detection_scores'] = tf.concat(
step_outputs['visualization'][1]['detection_scores'], axis=0
)
if 'detection_kpts' in step_outputs['visualization'][1]:
detection_keypoints = step_outputs['visualization'][1]['detection_kpts']
elif 'detection_keypoints' in step_outputs['visualization'][1]:
detection_keypoints = step_outputs['visualization'][1][
'detection_keypoints'
]
else:
detection_keypoints = None
if detection_keypoints is not None:
state['detection_keypoints'] = tf.concat(detection_keypoints, axis=0)
detection_masks = step_outputs['visualization'][1].get(
'detection_masks', None
)
if detection_masks:
state['detection_masks'] = tf.concat(detection_masks, axis=0)
return state
| 34,757 | 37.749164 | 80 | py |
models | models-master/official/vision/utils/object_detection/minibatch_sampler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
This is originally implemented in TensorFlow Object Detection API.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from official.vision.utils.object_detection import ops
class MinibatchSampler(object):
"""Abstract base class for subsampling minibatches."""
__metaclass__ = ABCMeta
def __init__(self):
"""Constructs a minibatch sampler."""
pass
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of the
MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
pass
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements are
allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(input=indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(
selected_indices,
tf.shape(input=indicator)[0])
return tf.equal(selected_indicator, 1)
| 3,074 | 32.064516 | 80 | py |
models | models-master/official/vision/utils/object_detection/shape_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else:
return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(input=tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(input=tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(input=clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[tf.zeros(len(trailing_paddings), dtype=tf.int32), trailing_paddings],
axis=1)
padded_tensor = tf.pad(tensor=clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
| 3,608 | 32.416667 | 79 | py |
models | models-master/official/vision/utils/object_detection/target_assigner.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from official.vision.utils.object_detection import box_list
from official.vision.utils.object_detection import shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0,
unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with respect
to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each anchor (and
can be empty for scalar targets). This shape must thus be compatible
with the groundtruth labels that are passed to the "assign" function
(which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None,
unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
groundtruth_weights=None,
**params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k] with labels for
each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty
(corresponding to scalar inputs). When set to None, groundtruth_labels
assumes a binary problem where all ground_truth boxes get a positive
label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of the
Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(
tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(
self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc(
groundtruth_boxes.get(), anchors.get())
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(),
[match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
# To broadcast matched_anchors_mask to the same shape as
# matched_reg_targets.
matched_anchors_mask = tf.tile(
tf.expand_dims(matched_anchors_mask, 1),
[1, tf.shape(matched_reg_targets)[1]])
reg_targets = tf.where(matched_anchors_mask, matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size * [0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] with
labels for each of the ground_truth boxes. The subshape [d_1, ... d_k]
can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors and
groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors and
groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self, match, groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors and
groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
class OlnTargetAssigner(TargetAssigner):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0,
unmatched_cls_target=None,
center_matcher=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with respect
to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each anchor (and
can be empty for scalar targets). This shape must thus be compatible
with the groundtruth labels that are passed to the "assign" function
(which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None,
unmatched_cls_target is set to be [0] for each anchor.
center_matcher: Matcher used to match groundtruth to anchors to sample and
assign the regression targets of centerness to each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
super(OlnTargetAssigner, self).__init__(
similarity_calc=similarity_calc,
matcher=matcher,
box_coder=box_coder,
negative_class_weight=negative_class_weight,
unmatched_cls_target=unmatched_cls_target)
# centerness-matcher with independent sampling IoU threshold.
self._center_matcher = center_matcher
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
groundtruth_weights=None,
**params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k] with labels for
each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty
(corresponding to scalar inputs). When set to None, groundtruth_labels
assumes a binary problem where all ground_truth boxes get a positive
label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of the
Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
matched_gt_boxlist: a BoxList object with data of float32 tensor with
shape [num_anchors, box_dimension] which encodes the coordinates of the
matched groundtruth boxes.
matched_anchors_mask: a Bool tensor with shape [num_anchors] which
indicates whether an anchor is matched or not.
center_matched_gt_boxlist: a BoxList object with data of float32 tensor
with shape [num_anchors, box_dimension] which encodes the coordinates of
the groundtruth boxes matched for centerness target assignment.
center_matched_anchors_mask: a Boolean tensor with shape [num_anchors]
which indicates whether an anchor is matched or not for centerness
target assignment.
matched_ious: a float32 tensor with shape [num_anchors] which encodes the
ious between each anchor and the matched groundtruth boxes.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(
tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(
self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc(
groundtruth_boxes.get(), anchors.get())
match = self._matcher.match(match_quality_matrix, **params)
reg_targets, matched_gt_boxlist, matched_anchors_mask = (
self._create_regression_targets(anchors,
groundtruth_boxes,
match))
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# Match for creation of centerness regression targets.
if self._center_matcher is not None:
center_match = self._center_matcher.match(
match_quality_matrix, **params)
center_matched_gt_boxes = center_match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
center_matched_gt_boxlist = box_list.BoxList(center_matched_gt_boxes)
center_matched_anchors_mask = center_match.matched_column_indicator()
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
if self._center_matcher is not None:
matched_ious = tf.reduce_max(match_quality_matrix, 0)
return (cls_targets, cls_weights, reg_targets, reg_weights, match,
matched_gt_boxlist, matched_anchors_mask,
center_matched_gt_boxlist, center_matched_anchors_mask,
matched_ious)
else:
return (cls_targets, cls_weights, reg_targets, reg_weights, match)
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(),
[match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
# To broadcast matched_anchors_mask to the same shape as
# matched_reg_targets.
matched_anchors_mask_tiled = tf.tile(
tf.expand_dims(matched_anchors_mask, 1),
[1, tf.shape(matched_reg_targets)[1]])
reg_targets = tf.where(matched_anchors_mask_tiled,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets, matched_gt_boxlist, matched_anchors_mask
| 24,198 | 45.181298 | 80 | py |
models | models-master/official/vision/utils/object_detection/faster_rcnn_box_coder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow as tf
from official.vision.utils.object_detection import box_coder
from official.vision.utils.object_detection import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If
set to None, does not perform scaling. For Faster RCNN, the open-source
implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.math.log(w / wa)
th = tf.math.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(a=tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(a=rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(a=tf.stack([ymin, xmin, ymax, xmax])))
| 3,806 | 31.262712 | 79 | py |
models | models-master/official/vision/utils/object_detection/matcher.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i]. (2)
match_results[i]=-1, meaning that column i is not matched. (3)
match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(input=self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat(
[tf.stack([ignored_value, unmatched_value]), input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher."""
__metaclass__ = ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of the
Matcher.
Returns:
A Match object with the results of matching.
"""
if not scope:
scope = 'Match'
with tf.name_scope(scope) as scope:
return Match(self._match(similarity_matrix, **params))
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of the
Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| 8,751 | 35.315353 | 80 | py |
models | models-master/official/vision/utils/object_detection/box_list_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from official.vision.utils.object_detection import box_list
from official.vision.utils.object_detection import ops
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope or 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope or 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope or 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope or 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of
the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope or 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min),
tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max),
tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of
the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope or 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max),
tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min),
tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope or 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def matched_intersection(boxlist1, boxlist2, scope=None):
"""Compute intersection areas between corresponding boxes in two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise intersections
"""
with tf.name_scope(scope or 'MatchedIntersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
min_ymax = tf.minimum(y_max1, y_max2)
max_ymin = tf.maximum(y_min1, y_min2)
intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
min_xmax = tf.minimum(x_max1, x_max2)
max_xmin = tf.maximum(x_min1, x_min2)
intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
return tf.reshape(intersect_heights * intersect_widths, [-1])
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope or 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0), tf.zeros_like(intersections),
tf.truediv(intersections, unions))
def matched_iou(boxlist1, boxlist2, scope=None):
"""Compute intersection-over-union between corresponding boxes in boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise iou scores.
"""
with tf.name_scope(scope or 'MatchedIOU'):
intersections = matched_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1 + areas2 - intersections
return tf.where(
tf.equal(intersections, 0.0), tf.zeros_like(intersections),
tf.truediv(intersections, unions))
def ioa(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-area between box collections.
intersection-over-area (IOA) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, ioa(box1, box2) != ioa(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise ioa scores.
"""
with tf.name_scope(scope or 'IOA'):
intersections = intersection(boxlist1, boxlist2)
areas = tf.expand_dims(area(boxlist2), 0)
return tf.truediv(intersections, areas)
def prune_non_overlapping_boxes(boxlist1,
boxlist2,
min_overlap=0.0,
scope=None):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
min_overlap: Minimum required overlap between boxes, to count them as
overlapping.
scope: name scope.
Returns:
new_boxlist1: A pruned boxlist with size [N', 4].
keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the
first input BoxList `boxlist1`.
"""
with tf.name_scope(scope or 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1, keep_inds
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope or 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(
tf.greater_equal(width, min_side), tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def change_coordinate_frame(boxlist, window, scope=None):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
scope: name scope.
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope(scope or 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(
box_list.BoxList(boxlist.get() -
[window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def sq_dist(boxlist1, boxlist2, scope=None):
"""Computes the pairwise squared distances between box corners.
This op treats each box as if it were a point in a 4d Euclidean space and
computes pairwise squared distances.
Mathematically, we are given two matrices of box coordinates X and Y,
where X(i,:) is the i'th row of X, containing the 4 numbers defining the
corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to
boxlist2. We compute
Z(i,j) = ||X(i,:) - Y(j,:)||^2
= ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise distances
"""
with tf.name_scope(scope or 'SqDist'):
sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)
sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)
innerprod = tf.matmul(
boxlist1.get(), boxlist2.get(), transpose_a=False, transpose_b=True)
return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
def boolean_mask(boxlist,
indicator,
fields=None,
scope=None,
use_static_shapes=False,
indicator_sum=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
indicator_sum: An integer containing the sum of `indicator` vector. Only
required if `use_static_shape` is True.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope or 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
if use_static_shapes:
if not (indicator_sum and isinstance(indicator_sum, int)):
raise ValueError('`indicator_sum` must be a of type int')
selected_positions = tf.cast(indicator, dtype=tf.float32)
indexed_positions = tf.cast(
tf.multiply(tf.cumsum(selected_positions), selected_positions),
dtype=tf.int32)
one_hot_selector = tf.one_hot(
indexed_positions - 1, indicator_sum, dtype=tf.float32)
sampled_indices = tf.cast(
tf.tensordot(
tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),
one_hot_selector,
axes=[0, 0]),
dtype=tf.int32)
return gather(boxlist, sampled_indices, use_static_shapes=True)
else:
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope(scope or 'Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
gather_op = tf.gather
if use_static_shapes:
gather_op = ops.matmul_gather_on_zeroth_axis
subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
fields += ['boxes']
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = gather_op(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def concatenate(boxlists, fields=None, scope=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all fields
from the first BoxList in the list are included in the concatenation.
scope: name scope.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
with tf.name_scope(scope or 'Concatenate'):
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = box_list.BoxList(
tf.concat([boxlist.get() for boxlist in boxlists], 0))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).get_shape().as_list()
first_field_shape[0] = -1
if None in first_field_shape:
raise ValueError('field %s must have fully defined shape except for the'
' 0th dimension.' % field)
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).get_shape().as_list()
field_shape[0] = -1
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = tf.concat(
[boxlist.get_field(field) for boxlist in boxlists], 0)
concatenated.add_field(field, concatenated_field)
return concatenated
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) descend or ascend. Default is descend.
scope: name scope.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist
ValueError: if the order is not either descend or ascend
"""
with tf.name_scope(scope or 'SortByField'):
if order != SortOrder.descend and order != SortOrder.ascend:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
if len(field_to_sort.shape.as_list()) != 1:
raise ValueError('Field should have rank 1')
num_boxes = boxlist.num_boxes()
num_entries = tf.size(field_to_sort)
length_assert = tf.Assert(
tf.equal(num_boxes, num_entries),
['Incorrect field size: actual vs expected.', num_entries, num_boxes])
with tf.control_dependencies([length_assert]):
_, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)
if order == SortOrder.ascend:
sorted_indices = tf.reverse_v2(sorted_indices, [0])
return gather(boxlist, sorted_indices)
def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
"""Overlay bounding box list on image.
Currently this visualization plots a 1 pixel thick red bounding box on top
of the image. Note that tf.image.draw_bounding_boxes essentially is
1 indexed.
Args:
image: an image tensor with shape [height, width, 3]
boxlist: a BoxList
normalized: (boolean) specify whether corners are to be interpreted as
absolute coordinates in image space or normalized with respect to the
image size.
scope: name scope.
Returns:
image_and_boxes: an image tensor with shape [height, width, 3]
"""
with tf.name_scope(scope or 'VisualizeBoxesInImage'):
if not normalized:
height, width, _ = tf.unstack(tf.shape(image))
boxlist = scale(boxlist, 1.0 / tf.cast(height, tf.float32),
1.0 / tf.cast(width, tf.float32))
corners = tf.expand_dims(boxlist.get(), 0)
image = tf.expand_dims(image, 0)
return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
def filter_field_value_equals(boxlist, field, value, scope=None):
"""Filter to keep only boxes with field entries equal to the given value.
Args:
boxlist: BoxList holding N boxes.
field: field name for filtering.
value: scalar value.
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not have
the specified field.
"""
with tf.name_scope(scope or 'FilterFieldValueEquals'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field(field):
raise ValueError('boxlist must contain the specified field')
filter_field = boxlist.get_field(field)
gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])
return gather(boxlist, gather_index)
def filter_greater_than(boxlist, thresh, scope=None):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
TODO(jonathanhuang): Change function name to filter_scores_greater_than
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
with tf.name_scope(scope or 'FilterGreaterThan'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape.as_list()) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = tf.cast(
tf.reshape(tf.where(tf.greater(scores, thresh)), [-1]), tf.int32)
return gather(boxlist, high_score_indices)
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope or 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(),
boxlist.get_field('scores'),
max_output_size,
iou_threshold=thresh)
return gather(boxlist, selected_indices)
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def to_normalized_coordinates(boxlist,
height,
width,
check_range=True,
scope=None):
"""Converts absolute box coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
boxlist: BoxList with coordinates in terms of pixel-locations.
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
boxlist with normalized coordinates in [0, 1].
"""
with tf.name_scope(scope or 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, 1 / height, 1 / width)
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.1,
scope=None):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered as
normalized, default to 1.1.
scope: name scope.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope(scope or 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum), [
'maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum
])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
def refine_boxes_multi_class(pool_boxes,
num_classes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Box refinement is done independently for each class.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field and a rank 1 'classes' field.
num_classes: (int scalar) Number of classes.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores and classes field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
if not pool_boxes.has_field('classes'):
raise ValueError('pool_boxes must have a \'classes\' field')
refined_boxes = []
for i in range(num_classes):
boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)
refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,
nms_max_detections, voting_iou_thresh)
refined_boxes.append(refined_boxes_class)
return sort_by_field(concatenate(refined_boxes), 'scores')
def refine_boxes(pool_boxes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
nms_boxes = non_max_suppression(pool_boxes, nms_iou_thresh,
nms_max_detections)
return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
"""Performs box voting as described in S. Gidaris and N.
Komodakis, ICCV 2015.
Performs box voting as described in 'Object detection via a multi-region &
semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
with iou overlap >= iou_thresh. The location of B is set to the weighted
average location of boxes in S (scores are used for weighting). And the score
of B is set to the average score of boxes in S.
Args:
selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
boxes are usually selected from pool_boxes using non max suppression.
pool_boxes: BoxList containing a set of (possibly redundant) boxes.
iou_thresh: (float scalar) iou threshold for matching boxes in
selected_boxes and pool_boxes.
Returns:
BoxList containing averaged locations and scores for each box in
selected_boxes.
Raises:
ValueError: if
a) selected_boxes or pool_boxes is not a BoxList.
b) if iou_thresh is not in [0, 1].
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if not isinstance(selected_boxes, box_list.BoxList):
raise ValueError('selected_boxes must be a BoxList')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32)
num_matches = tf.reduce_sum(match_indicator, 1)
# TODO(kbanoop): Handle the case where some boxes in selected_boxes do not
# match to any boxes in pool_boxes. For such boxes without any matches, we
# should return the original boxes without voting.
match_assert = tf.Assert(
tf.reduce_all(tf.greater(num_matches, 0)),
'Each box in selected_boxes must match with at least one box '
'in pool_boxes.')
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(
tf.reduce_all(tf.greater_equal(scores, 0)),
['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches
box_locations = tf.matmul(match_indicator,
pool_boxes.get() * scores) / sum_scores
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes
def get_minimal_coverage_box(boxlist, default_box=None, scope=None):
"""Creates a single bounding box which covers all boxes in the boxlist.
Args:
boxlist: A Boxlist.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[0., 0., 1., 1.]].
scope: Name scope.
Returns:
A [1, 4] float32 tensor with a bounding box that tightly covers all the
boxes in the box list. If the boxlist does not contain any boxes, the
default box is returned.
"""
with tf.name_scope(scope or 'CreateCoverageBox'):
num_boxes = boxlist.num_boxes()
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
default_box = default_box or tf.constant([[0., 0., 1., 1.]])
return tf.cond(
tf.greater_equal(num_boxes, 1),
true_fn=lambda: coverage_box(boxlist.get()),
false_fn=lambda: default_box)
def sample_boxes_by_jittering(boxlist,
num_boxes_to_sample,
stddev=0.1,
scope=None):
"""Samples num_boxes_to_sample boxes by jittering around boxlist boxes.
It is possible that this function might generate boxes with size 0. The larger
the stddev, this is more probable. For a small stddev of 0.1 this probability
is very small.
Args:
boxlist: A boxlist containing N boxes in normalized coordinates.
num_boxes_to_sample: A positive integer containing the number of boxes to
sample.
stddev: Standard deviation. This is used to draw random offsets for the box
corners from a normal distribution. The offset is multiplied by the box
size so will be larger in terms of pixels for larger boxes.
scope: Name scope.
Returns:
sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in
normalized coordinates.
"""
with tf.name_scope(scope or 'SampleBoxesByJittering'):
num_boxes = boxlist.num_boxes()
box_indices = tf.random_uniform([num_boxes_to_sample],
minval=0,
maxval=num_boxes,
dtype=tf.int32)
sampled_boxes = tf.gather(boxlist.get(), box_indices)
sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0]
sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1]
rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0]
minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1]
maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2]
maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3]
maxy = tf.maximum(miny, maxy)
maxx = tf.maximum(minx, maxx)
sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1)
sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0)
return box_list.BoxList(sampled_boxes)
| 41,961 | 37.42674 | 80 | py |
models | models-master/official/vision/utils/object_detection/balanced_positive_negative_sampler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow as tf
from official.vision.utils.object_detection import minibatch_sampler
from official.vision.utils.object_detection import ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
at most sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(input=sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(
input_tensor=tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.cast(
tf.cast(sample_size, tf.float32) * self._positive_fraction, tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning of
the input tensor.
num_end_samples: Number of examples to be sliced from the end of the input
tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input=input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(
tf.cumsum(selected_positions), selected_positions)
one_hot_selector = tf.one_hot(
tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(
tf.tensordot(
tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]),
tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(input=indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(
input_tensor=tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random.shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples, num_negative_samples
] = self._get_num_pos_neg_samples(sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples, num_negative_samples,
batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(
tf.reduce_sum(
input_tensor=tf.one_hot(sampled_idx, depth=input_length), axis=0),
tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(
permutation, depth=input_length, dtype=tf.float32)
return tf.cast(
tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections,
axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None if is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
scope = scope or 'BalancedPositiveNegativeSampler'
with tf.name_scope(scope):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(
input_tensor=tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = tf.cast(
self._positive_fraction * tf.cast(batch_size, tf.float32),
tf.int32)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(
input_tensor=tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.cast(
negative_positive_ratio *
tf.cast(num_sampled_pos, dtype=tf.float32),
dtype=tf.int32)
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| 12,058 | 42.067857 | 80 | py |
models | models-master/official/vision/utils/object_detection/region_similarity_calculator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
if not scope:
scope = 'Area'
with tf.name_scope(scope):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
if not scope:
scope = 'Intersection'
with tf.name_scope(scope):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
if not scope:
scope = 'IOU'
with tf.name_scope(scope):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0), tf.zeros_like(intersections),
tf.truediv(intersections, unions))
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
if not scope:
scope = 'Compare'
with tf.name_scope(scope) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| 4,544 | 30.783217 | 80 | py |
models | models-master/official/vision/utils/object_detection/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/utils/object_detection/ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for helper tensorflow ops.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow as tf
from official.vision.utils.object_detection import shape_utils
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch(
[tf.range(size), tf.cast(indices, dtype=tf.int32)], [zeros, values])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values. Must be at
least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64. Must be
in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
scope = scope or 'MatMulGather'
with tf.name_scope(scope):
params_shape = shape_utils.combined_static_and_dynamic_shape(params)
indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
| 3,212 | 38.182927 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.