repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/simclr/multitask_train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer binary for multitask simclr."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import train_utils
from official.modeling import performance
from official.modeling.multitask import multitask
from official.modeling.multitask import train_lib
# pylint: disable=unused-import
from official.projects.simclr.common import registry_imports
from official.projects.simclr.configs import multitask_config
from official.projects.simclr.modeling import multitask_model
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
tasks = multitask.MultiTask.from_config(params.task)
model = multitask_model.SimCLRMTModel(params.task.model)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=tasks,
model=model,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,755 | 35.746667 | 80 | py |
models | models-master/official/projects/simclr/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision SimCLR trainer."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.simclr.common import registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
print(FLAGS.experiment)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,488 | 36.149254 | 93 | py |
models | models-master/official/projects/simclr/common/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official.projects.simclr.configs import simclr
from official.projects.simclr.losses import contrastive_losses
from official.projects.simclr.modeling import simclr_model
from official.projects.simclr.tasks import simclr as simclr_task
from official.vision import registry_imports
| 972 | 41.304348 | 74 | py |
models | models-master/official/projects/simclr/configs/multitask_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-task SimCLR configs."""
import dataclasses
from typing import List, Tuple
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling.multitask import configs as multitask_configs
from official.projects.simclr.configs import simclr as simclr_configs
from official.projects.simclr.modeling import simclr_model
from official.vision.configs import backbones
from official.vision.configs import common
@dataclasses.dataclass
class SimCLRMTHeadConfig(hyperparams.Config):
"""Per-task specific configs."""
task_name: str = 'task_name'
# Supervised head is required for finetune, but optional for pretrain.
supervised_head: simclr_configs.SupervisedHead = dataclasses.field(
default_factory=lambda: simclr_configs.SupervisedHead(num_classes=1001)
)
mode: str = simclr_model.PRETRAIN
@dataclasses.dataclass
class SimCLRMTModelConfig(hyperparams.Config):
"""Model config for multi-task SimCLR model."""
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
backbone_trainable: bool = True
projection_head: simclr_configs.ProjectionHead = dataclasses.field(
default_factory=lambda: simclr_configs.ProjectionHead( # pylint: disable=g-long-lambda
proj_output_dim=128, num_proj_layers=3, ft_proj_idx=1
)
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False
)
)
heads: Tuple[SimCLRMTHeadConfig, ...] = ()
# L2 weight decay is used in the model, not in task.
# Note that this can not be used together with lars optimizer.
l2_weight_decay: float = 0.0
init_checkpoint: str = ''
# backbone_projection or backbone
init_checkpoint_modules: str = 'backbone_projection'
@exp_factory.register_config_factory('multitask_simclr')
def multitask_simclr() -> multitask_configs.MultiTaskExperimentConfig:
return multitask_configs.MultiTaskExperimentConfig(
task=multitask_configs.MultiTaskConfig(
model=SimCLRMTModelConfig(
heads=(SimCLRMTHeadConfig(
task_name='pretrain_simclr', mode=simclr_model.PRETRAIN),
SimCLRMTHeadConfig(
task_name='finetune_simclr',
mode=simclr_model.FINETUNE))),
task_routines=(multitask_configs.TaskRoutine(
task_name='pretrain_simclr',
task_config=simclr_configs.SimCLRPretrainTask(),
task_weight=2.0),
multitask_configs.TaskRoutine(
task_name='finetune_simclr',
task_config=simclr_configs.SimCLRFinetuneTask(),
task_weight=1.0))),
trainer=multitask_configs.MultiTaskTrainerConfig())
| 3,688 | 40.920455 | 93 | py |
models | models-master/official/projects/simclr/configs/simclr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SimCLR configurations."""
import dataclasses
import os
from typing import List, Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.projects.simclr.modeling import simclr_model
from official.vision.configs import backbones
from official.vision.configs import common
@dataclasses.dataclass
class Decoder(hyperparams.Config):
decode_label: bool = True
@dataclasses.dataclass
class Parser(hyperparams.Config):
"""Parser config."""
aug_rand_crop: bool = True
aug_rand_hflip: bool = True
aug_color_distort: bool = True
aug_color_jitter_strength: float = 1.0
aug_color_jitter_impl: str = 'simclrv2' # 'simclrv1' or 'simclrv2'
aug_rand_blur: bool = True
parse_label: bool = True
test_crop: bool = True
mode: str = simclr_model.PRETRAIN
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Training data config."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 10000
cycle_length: int = 10
# simclr specific configs
parser: Parser = dataclasses.field(default_factory=Parser)
decoder: Decoder = dataclasses.field(default_factory=Decoder)
# Useful when doing a sanity check that we absolutely use no labels while
# pretrain by setting labels to zeros (default = False, keep original labels)
input_set_label_to_zero: bool = False
@dataclasses.dataclass
class ProjectionHead(hyperparams.Config):
proj_output_dim: int = 128
num_proj_layers: int = 3
ft_proj_idx: int = 1 # layer of the projection head to use for fine-tuning.
@dataclasses.dataclass
class SupervisedHead(hyperparams.Config):
num_classes: int = 1001
zero_init: bool = False
@dataclasses.dataclass
class ContrastiveLoss(hyperparams.Config):
projection_norm: bool = True
temperature: float = 0.1
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class ClassificationLosses(hyperparams.Config):
label_smoothing: float = 0.0
one_hot: bool = True
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
top_k: int = 5
one_hot: bool = True
@dataclasses.dataclass
class SimCLRModel(hyperparams.Config):
"""SimCLR model config."""
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet', resnet=backbones.ResNet()
)
)
projection_head: ProjectionHead = dataclasses.field(
default_factory=lambda: ProjectionHead( # pylint: disable=g-long-lambda
proj_output_dim=128, num_proj_layers=3, ft_proj_idx=1
)
)
supervised_head: SupervisedHead = dataclasses.field(
default_factory=lambda: SupervisedHead(num_classes=1001)
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False
)
)
mode: str = simclr_model.PRETRAIN
backbone_trainable: bool = True
@dataclasses.dataclass
class SimCLRPretrainTask(cfg.TaskConfig):
"""SimCLR pretraining task config."""
model: SimCLRModel = dataclasses.field(
default_factory=lambda: SimCLRModel(mode=simclr_model.PRETRAIN)
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
parser=Parser(mode=simclr_model.PRETRAIN), is_training=True
)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
parser=Parser(mode=simclr_model.PRETRAIN), is_training=False
)
)
loss: ContrastiveLoss = dataclasses.field(default_factory=ContrastiveLoss)
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
init_checkpoint: Optional[str] = None
# all or backbone
init_checkpoint_modules: str = 'all'
@dataclasses.dataclass
class SimCLRFinetuneTask(cfg.TaskConfig):
"""SimCLR fine tune task config."""
model: SimCLRModel = dataclasses.field(
default_factory=lambda: SimCLRModel( # pylint: disable=g-long-lambda
mode=simclr_model.FINETUNE,
supervised_head=SupervisedHead(num_classes=1001, zero_init=True),
)
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
parser=Parser(mode=simclr_model.FINETUNE), is_training=True
)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
parser=Parser(mode=simclr_model.FINETUNE), is_training=False
)
)
loss: ClassificationLosses = dataclasses.field(
default_factory=ClassificationLosses
)
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
init_checkpoint: Optional[str] = None
# all, backbone_projection or backbone
init_checkpoint_modules: str = 'backbone_projection'
@exp_factory.register_config_factory('simclr_pretraining')
def simclr_pretraining() -> cfg.ExperimentConfig:
"""Image classification general."""
return cfg.ExperimentConfig(
task=SimCLRPretrainTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
@exp_factory.register_config_factory('simclr_finetuning')
def simclr_finetuning() -> cfg.ExperimentConfig:
"""Image classification general."""
return cfg.ExperimentConfig(
task=SimCLRFinetuneTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
@exp_factory.register_config_factory('simclr_pretraining_imagenet')
def simclr_pretraining_imagenet() -> cfg.ExperimentConfig:
"""Image classification general."""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
return cfg.ExperimentConfig(
task=SimCLRPretrainTask(
model=SimCLRModel(
mode=simclr_model.PRETRAIN,
backbone_trainable=True,
input_size=[224, 224, 3],
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
projection_head=ProjectionHead(
proj_output_dim=128, num_proj_layers=3, ft_proj_idx=1),
supervised_head=SupervisedHead(num_classes=1001),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=True)),
loss=ContrastiveLoss(),
evaluation=Evaluation(),
train_data=DataConfig(
parser=Parser(mode=simclr_model.PRETRAIN),
decoder=Decoder(decode_label=True),
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
parser=Parser(mode=simclr_model.PRETRAIN),
decoder=Decoder(decode_label=True),
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=500 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'lars',
'lars': {
'momentum':
0.9,
'weight_decay_rate':
0.000001,
'exclude_from_weight_decay': [
'batch_normalization', 'bias'
]
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
# 0.2 * BatchSize / 256
'initial_learning_rate': 0.2 * train_batch_size / 256,
# train_steps - warmup_steps
'decay_steps': 475 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
# 5% of total epochs
'warmup_steps': 25 * steps_per_epoch
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
@exp_factory.register_config_factory('simclr_finetuning_imagenet')
def simclr_finetuning_imagenet() -> cfg.ExperimentConfig:
"""Image classification general."""
train_batch_size = 1024
eval_batch_size = 1024
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
pretrain_model_base = ''
return cfg.ExperimentConfig(
task=SimCLRFinetuneTask(
model=SimCLRModel(
mode=simclr_model.FINETUNE,
backbone_trainable=True,
input_size=[224, 224, 3],
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
projection_head=ProjectionHead(
proj_output_dim=128, num_proj_layers=3, ft_proj_idx=1),
supervised_head=SupervisedHead(num_classes=1001, zero_init=True),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
loss=ClassificationLosses(),
evaluation=Evaluation(),
train_data=DataConfig(
parser=Parser(mode=simclr_model.FINETUNE),
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
parser=Parser(mode=simclr_model.FINETUNE),
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
global_batch_size=eval_batch_size),
init_checkpoint=pretrain_model_base,
# all, backbone_projection or backbone
init_checkpoint_modules='backbone_projection'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=60 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'lars',
'lars': {
'momentum':
0.9,
'weight_decay_rate':
0.0,
'exclude_from_weight_decay': [
'batch_normalization', 'bias'
]
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
# 0.01 × BatchSize / 512
'initial_learning_rate': 0.01 * train_batch_size / 512,
'decay_steps': 60 * steps_per_epoch
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
| 12,753 | 35.544413 | 85 | py |
models | models-master/official/projects/simclr/configs/multitask_config_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multitask_config."""
import tensorflow as tf
from official.core import exp_factory
from official.modeling.multitask import configs as multitask_configs
from official.projects.simclr.configs import multitask_config as simclr_multitask_config
from official.projects.simclr.configs import simclr as exp_cfg
class MultitaskConfigTest(tf.test.TestCase):
def test_simclr_configs(self):
config = exp_factory.get_exp_config('multitask_simclr')
self.assertIsInstance(config, multitask_configs.MultiTaskExperimentConfig)
self.assertIsInstance(config.task.model,
simclr_multitask_config.SimCLRMTModelConfig)
self.assertIsInstance(config.task.task_routines[0].task_config,
exp_cfg.SimCLRPretrainTask)
self.assertIsInstance(config.task.task_routines[1].task_config,
exp_cfg.SimCLRFinetuneTask)
if __name__ == '__main__':
tf.test.main()
| 1,554 | 37.875 | 88 | py |
models | models-master/official/projects/simclr/configs/simclr_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SimCLR config."""
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.simclr.common import registry_imports # pylint: disable=unused-import
from official.projects.simclr.configs import simclr as exp_cfg
class SimCLRConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
'simclr_pretraining_imagenet', 'simclr_finetuning_imagenet')
def test_simclr_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
if config_name == 'simclr_pretrain_imagenet':
self.assertIsInstance(config.task, exp_cfg.SimCLRPretrainTask)
elif config_name == 'simclr_finetuning_imagenet':
self.assertIsInstance(config.task, exp_cfg.SimCLRFinetuneTask)
self.assertIsInstance(config.task.model,
exp_cfg.SimCLRModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,830 | 37.957447 | 93 | py |
models | models-master/official/projects/simclr/dataloaders/simclr_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for SimCLR.
For pre-training:
- Preprocessing:
-> random cropping
-> resize back to the original size
-> random color distortions
-> random Gaussian blur (sequential)
- Each image need to be processed randomly twice
```snippets
if train_mode == 'pretrain':
xs = []
for _ in range(2): # Two transformations
xs.append(preprocess_fn_pretrain(image))
image = tf.concat(xs, -1)
else:
image = preprocess_fn_finetune(image)
```
For fine-tuning:
typical image classification input
"""
from typing import List
import tensorflow as tf
from official.projects.simclr.dataloaders import preprocess_ops as simclr_preprocess_ops
from official.projects.simclr.modeling import simclr_model
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
from official.vision.ops import preprocess_ops
class Decoder(decoder.Decoder):
"""A tf.Example decoder for classification task."""
def __init__(self, decode_label=True):
self._decode_label = decode_label
self._keys_to_features = {
'image/encoded': tf.io.FixedLenFeature((), tf.string, default_value=''),
}
if self._decode_label:
self._keys_to_features.update({
'image/class/label': (
tf.io.FixedLenFeature((), tf.int64, default_value=-1))
})
def decode(self, serialized_example):
return tf.io.parse_single_example(
serialized_example, self._keys_to_features)
class TFDSDecoder(decoder.Decoder):
"""A TFDS decoder for classification task."""
def __init__(self, decode_label=True):
self._decode_label = decode_label
def decode(self, serialized_example):
sample_dict = {
'image/encoded': tf.io.encode_jpeg(
serialized_example['image'], quality=100),
}
if self._decode_label:
sample_dict.update({
'image/class/label': serialized_example['label'],
})
return sample_dict
class Parser(parser.Parser):
"""Parser for SimCLR training."""
def __init__(self,
output_size: List[int],
aug_rand_crop: bool = True,
aug_rand_hflip: bool = True,
aug_color_distort: bool = True,
aug_color_jitter_strength: float = 1.0,
aug_color_jitter_impl: str = 'simclrv2',
aug_rand_blur: bool = True,
parse_label: bool = True,
test_crop: bool = True,
mode: str = simclr_model.PRETRAIN,
dtype: str = 'float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
aug_rand_crop: `bool`, if Ture, augment training with random cropping.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_color_distort: `bool`, if True augment training with color distortion.
aug_color_jitter_strength: `float`, the floating number for the strength
of the color augmentation
aug_color_jitter_impl: `str`, 'simclrv1' or 'simclrv2'. Define whether
to use simclrv1 or simclrv2's version of random brightness.
aug_rand_blur: `bool`, if True, augment training with random blur.
parse_label: `bool`, if True, parse label together with image.
test_crop: `bool`, if True, augment eval with center cropping.
mode: `str`, 'pretain' or 'finetune'. Define training mode.
dtype: `str`, cast output image in dtype. It can be 'float32', 'float16',
or 'bfloat16'.
"""
self._output_size = output_size
self._aug_rand_crop = aug_rand_crop
self._aug_rand_hflip = aug_rand_hflip
self._aug_color_distort = aug_color_distort
self._aug_color_jitter_strength = aug_color_jitter_strength
self._aug_color_jitter_impl = aug_color_jitter_impl
self._aug_rand_blur = aug_rand_blur
self._parse_label = parse_label
self._mode = mode
self._test_crop = test_crop
if max(self._output_size[0], self._output_size[1]) <= 32:
self._test_crop = False
if dtype == 'float32':
self._dtype = tf.float32
elif dtype == 'float16':
self._dtype = tf.float16
elif dtype == 'bfloat16':
self._dtype = tf.bfloat16
else:
raise ValueError('dtype {!r} is not supported!'.format(dtype))
def _parse_one_train_image(self, image_bytes):
image = tf.image.decode_jpeg(image_bytes, channels=3)
# This line convert the image to float 0.0 - 1.0
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if self._aug_rand_crop:
image = simclr_preprocess_ops.random_crop_with_resize(
image, self._output_size[0], self._output_size[1])
if self._aug_rand_hflip:
image = tf.image.random_flip_left_right(image)
if self._aug_color_distort and self._mode == simclr_model.PRETRAIN:
image = simclr_preprocess_ops.random_color_jitter(
image=image,
color_jitter_strength=self._aug_color_jitter_strength,
impl=self._aug_color_jitter_impl)
if self._aug_rand_blur and self._mode == simclr_model.PRETRAIN:
image = simclr_preprocess_ops.random_blur(
image, self._output_size[0], self._output_size[1])
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.reshape(image, [self._output_size[0], self._output_size[1], 3])
image = tf.clip_by_value(image, 0., 1.)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
return image
def _parse_train_data(self, decoded_tensors):
"""Parses data for training."""
image_bytes = decoded_tensors['image/encoded']
if self._mode == simclr_model.FINETUNE:
image = self._parse_one_train_image(image_bytes)
elif self._mode == simclr_model.PRETRAIN:
# Transform each example twice using a combination of
# simple augmentations, resulting in 2N data points
xs = []
for _ in range(2):
xs.append(self._parse_one_train_image(image_bytes))
image = tf.concat(xs, -1)
else:
raise ValueError('The mode {} is not supported by the Parser.'
.format(self._mode))
if self._parse_label:
label = tf.cast(decoded_tensors['image/class/label'], dtype=tf.int32)
return image, label
return image
def _parse_eval_data(self, decoded_tensors):
"""Parses data for evaluation."""
image_bytes = decoded_tensors['image/encoded']
image_shape = tf.image.extract_jpeg_shape(image_bytes)
if self._test_crop:
image = preprocess_ops.center_crop_image_v2(image_bytes, image_shape)
else:
image = tf.image.decode_jpeg(image_bytes, channels=3)
# This line convert the image to float 0.0 - 1.0
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image = tf.reshape(image, [self._output_size[0], self._output_size[1], 3])
image = tf.clip_by_value(image, 0., 1.)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
if self._parse_label:
label = tf.cast(decoded_tensors['image/class/label'], dtype=tf.int32)
return image, label
return image
| 8,094 | 34.349345 | 88 | py |
models | models-master/official/projects/simclr/dataloaders/preprocess_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops."""
import functools
import tensorflow as tf
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random.uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1 - contrast, upper=1 + contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1 - contrast,
upper=1 + contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random.shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def random_color_jitter(image,
p=1.0,
color_jitter_strength=1.0,
impl='simclrv2'):
"""Perform random color jitter."""
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=color_jitter_strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size / 2, dtype=tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_blur(image, height, width, p=0.5):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height // 10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope or 'distorted_bounding_box_crop'):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
| 12,279 | 34.085714 | 79 | py |
models | models-master/official/projects/simclr/modeling/multitask_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-task image multi-taskSimCLR model definition."""
from typing import Dict, Text
from absl import logging
import tensorflow as tf
from official.modeling.multitask import base_model
from official.projects.simclr.configs import multitask_config as simclr_multitask_config
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
PROJECTION_OUTPUT_KEY = 'projection_outputs'
SUPERVISED_OUTPUT_KEY = 'supervised_outputs'
class SimCLRMTModel(base_model.MultiTaskBaseModel):
"""A multi-task SimCLR model that does both pretrain and finetune."""
def __init__(self, config: simclr_multitask_config.SimCLRMTModelConfig,
**kwargs):
self._config = config
# Build shared backbone.
self._input_specs = tf.keras.layers.InputSpec(shape=[None] +
config.input_size)
l2_weight_decay = config.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
self._l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
self._backbone = backbones.factory.build_backbone(
input_specs=self._input_specs,
backbone_config=config.backbone,
norm_activation_config=config.norm_activation,
l2_regularizer=self._l2_regularizer)
# Build the shared projection head
norm_activation_config = self._config.norm_activation
projection_head_config = self._config.projection_head
self._projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=self._l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
super().__init__(**kwargs)
def _instantiate_sub_tasks(self) -> Dict[Text, tf.keras.Model]:
tasks = {}
for model_config in self._config.heads:
# Build supervised head
supervised_head_config = model_config.supervised_head
if supervised_head_config:
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=self._l2_regularizer)
else:
supervised_head = None
tasks[model_config.task_name] = simclr_model.SimCLRModel(
input_specs=self._input_specs,
backbone=self._backbone,
projection_head=self._projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=self._config.backbone_trainable)
return tasks
def initialize(self):
"""Loads the multi-task SimCLR model with a pretrained checkpoint."""
ckpt_dir_or_file = self._config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
logging.info('Loading pretrained %s', self._config.init_checkpoint_modules)
if self._config.init_checkpoint_modules == 'backbone':
pretrained_items = dict(backbone=self._backbone)
elif self._config.init_checkpoint_modules == 'backbone_projection':
pretrained_items = dict(
backbone=self._backbone, projection_head=self._projection_head)
else:
raise ValueError(
"Only 'backbone_projection' or 'backbone' can be used to "
'initialize the model.')
ckpt = tf.train.Checkpoint(**pretrained_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self._backbone, projection_head=self._projection_head)
| 5,142 | 39.496063 | 88 | py |
models | models-master/official/projects/simclr/modeling/multitask_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multitask_model."""
import os.path
import tensorflow as tf
from official.projects.simclr.configs import multitask_config
from official.projects.simclr.modeling import multitask_model
from official.projects.simclr.modeling import simclr_model
class MultitaskModelTest(tf.test.TestCase):
def test_initialize_model_success(self):
ckpt_dir = self.get_temp_dir()
config = multitask_config.SimCLRMTModelConfig(
input_size=[64, 64, 3],
heads=(multitask_config.SimCLRMTHeadConfig(
mode=simclr_model.PRETRAIN, task_name='pretrain_simclr'),
multitask_config.SimCLRMTHeadConfig(
mode=simclr_model.FINETUNE, task_name='finetune_simclr')))
model = multitask_model.SimCLRMTModel(config)
self.assertIn('pretrain_simclr', model.sub_tasks)
self.assertIn('finetune_simclr', model.sub_tasks)
ckpt = tf.train.Checkpoint(backbone=model._backbone)
ckpt.save(os.path.join(ckpt_dir, 'ckpt'))
model.initialize()
if __name__ == '__main__':
tf.test.main()
| 1,657 | 35.043478 | 77 | py |
models | models-master/official/projects/simclr/modeling/simclr_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build simclr models."""
from typing import Optional
from absl import logging
import tensorflow as tf
layers = tf.keras.layers
PRETRAIN = 'pretrain'
FINETUNE = 'finetune'
PROJECTION_OUTPUT_KEY = 'projection_outputs'
SUPERVISED_OUTPUT_KEY = 'supervised_outputs'
class SimCLRModel(tf.keras.Model):
"""A classification model based on SimCLR framework."""
def __init__(self,
backbone: tf.keras.models.Model,
projection_head: tf.keras.layers.Layer,
supervised_head: Optional[tf.keras.layers.Layer] = None,
input_specs=layers.InputSpec(shape=[None, None, None, 3]),
mode: str = PRETRAIN,
backbone_trainable: bool = True,
**kwargs):
"""A classification model based on SimCLR framework.
Args:
backbone: a backbone network.
projection_head: a projection head network.
supervised_head: a head network for supervised learning, e.g.
classification head.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
mode: `str` indicates mode of training to be executed.
backbone_trainable: `bool` whether the backbone is trainable or not.
**kwargs: keyword arguments to be passed.
"""
super(SimCLRModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'projection_head': projection_head,
'supervised_head': supervised_head,
'input_specs': input_specs,
'mode': mode,
'backbone_trainable': backbone_trainable,
}
self._input_specs = input_specs
self._backbone = backbone
self._projection_head = projection_head
self._supervised_head = supervised_head
self._mode = mode
self._backbone_trainable = backbone_trainable
# Set whether the backbone is trainable
self._backbone.trainable = backbone_trainable
def call(self, inputs, training=None, **kwargs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
model_outputs = {}
if training and self._mode == PRETRAIN:
num_transforms = 2
# Split channels, and optionally apply extra batched augmentation.
# (bsz, h, w, c*num_transforms) -> [(bsz, h, w, c), ....]
features_list = tf.split(
inputs, num_or_size_splits=num_transforms, axis=-1)
# (num_transforms * bsz, h, w, c)
features = tf.concat(features_list, 0)
else:
num_transforms = 1
features = inputs
# Base network forward pass.
endpoints = self._backbone(
features, training=training and self._backbone_trainable)
features = endpoints[max(endpoints.keys())]
projection_inputs = layers.GlobalAveragePooling2D()(features)
# Add heads.
projection_outputs, supervised_inputs = self._projection_head(
projection_inputs, training)
if self._supervised_head is not None:
if self._mode == PRETRAIN:
logging.info('Ignoring gradient from supervised outputs !')
# When performing pretraining and supervised_head together, we do not
# want information from supervised evaluation flowing back into
# pretraining network. So we put a stop_gradient.
supervised_outputs = self._supervised_head(
tf.stop_gradient(supervised_inputs), training)
else:
supervised_outputs = self._supervised_head(supervised_inputs, training)
else:
supervised_outputs = None
model_outputs.update({
PROJECTION_OUTPUT_KEY: projection_outputs,
SUPERVISED_OUTPUT_KEY: supervised_outputs
})
return model_outputs
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
if self._supervised_head is not None:
items = dict(
backbone=self.backbone,
projection_head=self.projection_head,
supervised_head=self.supervised_head)
else:
items = dict(backbone=self.backbone, projection_head=self.projection_head)
return items
@property
def backbone(self):
return self._backbone
@property
def projection_head(self):
return self._projection_head
@property
def supervised_head(self):
return self._supervised_head
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def backbone_trainable(self):
return self._backbone_trainable
@backbone_trainable.setter
def backbone_trainable(self, value):
self._backbone_trainable = value
self._backbone.trainable = value
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,335 | 31.536585 | 125 | py |
models | models-master/official/projects/simclr/modeling/simclr_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for SimCLR model."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
class SimCLRModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 3, 0),
(128, 3, 1),
(128, 1, 0),
(128, 1, 1),
)
def test_model_creation(self, project_dim, num_proj_layers, ft_proj_idx):
input_size = 224
inputs = np.random.rand(2, input_size, input_size, 3)
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet(model_id=50, activation='relu',
input_specs=input_specs)
projection_head = simclr_head.ProjectionHead(
proj_output_dim=project_dim,
num_proj_layers=num_proj_layers,
ft_proj_idx=ft_proj_idx
)
num_classes = 10
supervised_head = simclr_head.ClassificationHead(
num_classes=10
)
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=simclr_model.PRETRAIN
)
outputs = model(inputs)
projection_outputs = outputs[simclr_model.PROJECTION_OUTPUT_KEY]
supervised_outputs = outputs[simclr_model.SUPERVISED_OUTPUT_KEY]
self.assertAllEqual(projection_outputs.shape.as_list(),
[2, project_dim])
self.assertAllEqual([2, num_classes],
supervised_outputs.numpy().shape)
if __name__ == '__main__':
tf.test.main()
| 2,398 | 32.319444 | 75 | py |
models | models-master/official/projects/simclr/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.projects.simclr.modeling.layers import nn_blocks
class DenseBNTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(64, True, True),
(64, True, False),
(64, False, True),
)
def test_pass_through(self, output_dim, use_bias, use_normalization):
test_layer = nn_blocks.DenseBN(
output_dim=output_dim,
use_bias=use_bias,
use_normalization=use_normalization
)
x = tf.keras.Input(shape=(64,))
out_x = test_layer(x)
self.assertAllEqual(out_x.shape.as_list(), [None, output_dim])
# kernel of the dense layer
train_var_len = 1
if use_normalization:
if use_bias:
# batch norm introduce two trainable variables
train_var_len += 2
else:
# center is set to False if not use bias
train_var_len += 1
else:
if use_bias:
# bias of dense layer
train_var_len += 1
self.assertLen(test_layer.trainable_variables, train_var_len)
if __name__ == '__main__':
tf.test.main()
| 1,723 | 28.220339 | 74 | py |
models | models-master/official/projects/simclr/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for simclr neural networks."""
from typing import Text, Optional
import tensorflow as tf
from official.modeling import tf_utils
regularizers = tf.keras.regularizers
class DenseBN(tf.keras.layers.Layer):
"""Modified Dense layer to help build simclr system.
The layer is a standards combination of Dense, BatchNorm and Activation.
"""
def __init__(
self,
output_dim: int,
use_bias: bool = True,
use_normalization: bool = False,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
activation: Optional[Text] = 'relu',
kernel_initializer: Text = 'VarianceScaling',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
name='linear_layer',
**kwargs):
"""Customized Dense layer.
Args:
output_dim: `int` size of output dimension.
use_bias: if True, use biase in the dense layer.
use_normalization: if True, use batch normalization.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization momentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
activation: `str` name of the activation function.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
name: `str`, name of the layer.
**kwargs: keyword arguments to be passed.
"""
# Note: use_bias is ignored for the dense layer when use_bn=True.
# However, it is still used for batch norm.
super(DenseBN, self).__init__(**kwargs)
self._output_dim = output_dim
self._use_bias = use_bias
self._use_normalization = use_normalization
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._name = name
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
if activation:
self._activation_fn = tf_utils.get_activation(activation)
else:
self._activation_fn = None
def get_config(self):
config = {
'output_dim': self._output_dim,
'use_bias': self._use_bias,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_normalization': self._use_normalization,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
base_config = super(DenseBN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._dense0 = tf.keras.layers.Dense(
self._output_dim,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias and not self._use_normalization)
if self._use_normalization:
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
center=self._use_bias,
scale=True)
super(DenseBN, self).build(input_shape)
def call(self, inputs, training=None):
assert inputs.shape.ndims == 2, inputs.shape
x = self._dense0(inputs)
if self._use_normalization:
x = self._norm0(x)
if self._activation:
x = self._activation_fn(x)
return x
| 4,823 | 35 | 78 | py |
models | models-master/official/projects/simclr/tasks/simclr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image SimCLR task definition.
SimCLR training two different modes:
- pretrain
- fine-tuning
For the above two different modes, the following components are different in
the task definition:
- training data format
- training loss
- projection_head and/or supervised_head
"""
from typing import Dict, Optional
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import optimization
from official.modeling import performance
from official.modeling import tf_utils
from official.projects.simclr.configs import simclr as exp_cfg
from official.projects.simclr.dataloaders import simclr_input
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.losses import contrastive_losses
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.SimCLRPretrainTask)
class SimCLRPretrainTask(base_task.Task):
"""A task for image classification."""
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
if (optimizer_config.optimizer.type == 'lars' and
self.task_config.loss.l2_weight_decay > 0.0):
raise ValueError('The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.')
opt_factory = optimization.OptimizerFactory(optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
# Configuring optimizer when loss_scale is set in runtime config. This helps
# avoiding overflow/underflow for float16 computations.
if runtime_config and runtime_config.loss_scale:
optimizer = performance.configure_optimizer(
optimizer,
use_float16=runtime_config.mixed_precision_dtype == 'float16',
loss_scale=runtime_config.loss_scale)
return optimizer
def build_model(self):
model_config = self.task_config.model
input_specs = tf.keras.layers.InputSpec(shape=[None] +
model_config.input_size)
l2_weight_decay = self.task_config.loss.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
# Build backbone
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
# Build projection head
norm_activation_config = model_config.norm_activation
projection_head_config = model_config.projection_head
projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
# Build supervised head
supervised_head_config = model_config.supervised_head
if supervised_head_config:
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=l2_regularizer)
else:
supervised_head = None
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=model_config.backbone_trainable)
logging.info(model.get_config())
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None):
input_size = self.task_config.model.input_size
if params.tfds_name:
decoder = simclr_input.TFDSDecoder(params.decoder.decode_label)
else:
decoder = simclr_input.Decoder(params.decoder.decode_label)
parser = simclr_input.Parser(
output_size=input_size[:2],
aug_rand_crop=params.parser.aug_rand_crop,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_color_distort=params.parser.aug_color_distort,
aug_color_jitter_strength=params.parser.aug_color_jitter_strength,
aug_color_jitter_impl=params.parser.aug_color_jitter_impl,
aug_rand_blur=params.parser.aug_rand_blur,
parse_label=params.parser.parse_label,
test_crop=params.parser.test_crop,
mode=params.parser.mode,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels,
model_outputs,
aux_losses=None) -> Dict[str, tf.Tensor]:
# Compute contrastive relative loss
con_losses_obj = contrastive_losses.ContrastiveLoss(
projection_norm=self.task_config.loss.projection_norm,
temperature=self.task_config.loss.temperature)
# The projection outputs from model has the size of
# (2 * bsz, project_dim)
projection_outputs = model_outputs[simclr_model.PROJECTION_OUTPUT_KEY]
projection1, projection2 = tf.split(projection_outputs, 2, 0)
contrast_loss, (contrast_logits, contrast_labels) = con_losses_obj(
projection1=projection1, projection2=projection2)
contrast_accuracy = tf.equal(
tf.argmax(contrast_labels, axis=1), tf.argmax(contrast_logits, axis=1))
contrast_accuracy = tf.reduce_mean(tf.cast(contrast_accuracy, tf.float32))
contrast_prob = tf.nn.softmax(contrast_logits)
contrast_entropy = -tf.reduce_mean(
tf.reduce_sum(contrast_prob * tf.math.log(contrast_prob + 1e-8), -1))
model_loss = contrast_loss
losses = {
'contrast_loss': contrast_loss,
'contrast_accuracy': contrast_accuracy,
'contrast_entropy': contrast_entropy
}
if self.task_config.model.supervised_head is not None:
outputs = model_outputs[simclr_model.SUPERVISED_OUTPUT_KEY]
labels = tf.concat([labels, labels], 0)
if self.task_config.evaluation.one_hot:
sup_loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
outputs)
else:
sup_loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
outputs)
sup_loss = tf.reduce_mean(sup_loss)
label_acc = tf.equal(
tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1))
label_acc = tf.reduce_mean(tf.cast(label_acc, tf.float32))
model_loss = contrast_loss + sup_loss
losses.update({
'accuracy': label_acc,
'supervised_loss': sup_loss,
})
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
losses['total_loss'] = total_loss
return losses
def build_metrics(self, training=True):
if training:
metrics = []
metric_names = [
'total_loss', 'contrast_loss', 'contrast_accuracy', 'contrast_entropy'
]
if self.task_config.model.supervised_head:
metric_names.extend(['supervised_loss', 'accuracy'])
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
else:
k = self.task_config.evaluation.top_k
if self.task_config.evaluation.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
features, labels = inputs
# To do a sanity check that we absolutely use no labels when pretraining, we
# can set the labels here to zero.
if self.task_config.train_data.input_set_label_to_zero:
labels *= 0
if (self.task_config.model.supervised_head is not None and
self.task_config.evaluation.one_hot):
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
logging.info('Trainable variables:')
for var in tvars:
logging.info(var.name)
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
for m in metrics:
m.update_state(losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self, inputs, model, metrics=None):
if self.task_config.model.supervised_head is None:
raise ValueError(
'Skipping eval during pretraining without supervised head.')
features, labels = inputs
if self.task_config.evaluation.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
outputs = model(
features, training=False)[simclr_model.SUPERVISED_OUTPUT_KEY]
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
logs = {self.loss: 0}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
@task_factory.register_task_cls(exp_cfg.SimCLRFinetuneTask)
class SimCLRFinetuneTask(base_task.Task):
"""A task for image classification."""
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
if (optimizer_config.optimizer.type == 'lars' and
self.task_config.loss.l2_weight_decay > 0.0):
raise ValueError('The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.')
opt_factory = optimization.OptimizerFactory(optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
# Configuring optimizer when loss_scale is set in runtime config. This helps
# avoiding overflow/underflow for float16 computations.
if runtime_config and runtime_config.loss_scale:
optimizer = performance.configure_optimizer(
optimizer,
use_float16=runtime_config.mixed_precision_dtype == 'float16',
loss_scale=runtime_config.loss_scale)
return optimizer
def build_model(self):
model_config = self.task_config.model
input_specs = tf.keras.layers.InputSpec(shape=[None] +
model_config.input_size)
l2_weight_decay = self.task_config.loss.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
norm_activation_config = model_config.norm_activation
projection_head_config = model_config.projection_head
projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
supervised_head_config = model_config.supervised_head
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=l2_regularizer)
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=model_config.backbone_trainable)
logging.info(model.get_config())
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone_projection':
ckpt = tf.train.Checkpoint(
backbone=model.backbone, projection_head=model.projection_head)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
# If the checkpoint is from pretraining, reset the following parameters
model.backbone_trainable = self.task_config.model.backbone_trainable
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None):
input_size = self.task_config.model.input_size
if params.tfds_name:
decoder = simclr_input.TFDSDecoder(params.decoder.decode_label)
else:
decoder = simclr_input.Decoder(params.decoder.decode_label)
parser = simclr_input.Parser(
output_size=input_size[:2],
parse_label=params.parser.parse_label,
test_crop=params.parser.test_crop,
mode=params.parser.mode,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, labels, model_outputs, aux_losses=None):
"""Sparse categorical cross entropy loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
losses_config = self.task_config.loss
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=True,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation."""
k = self.task_config.evaluation.top_k
if self.task_config.evaluation.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
if self.task_config.loss.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(
features, training=True)[simclr_model.SUPERVISED_OUTPUT_KEY]
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
logging.info('Trainable variables:')
for var in tvars:
logging.info(var.name)
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
if self.task_config.loss.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
outputs = self.inference_step(features,
model)[simclr_model.SUPERVISED_OUTPUT_KEY]
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
| 24,422 | 37.400943 | 80 | py |
models | models-master/official/projects/simclr/losses/contrastive_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contrastive loss functions."""
import functools
import tensorflow as tf
LARGE_NUM = 1e9
def cross_replica_concat(tensor: tf.Tensor, num_replicas: int) -> tf.Tensor:
"""Reduce a concatenation of the `tensor` across multiple replicas.
Args:
tensor: `tf.Tensor` to concatenate.
num_replicas: `int` number of replicas.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if num_replicas <= 1:
return tensor
replica_context = tf.distribute.get_replica_context()
with tf.name_scope('cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[replica_context.replica_id_in_sync_group]],
updates=[tensor],
shape=tf.concat([[num_replicas], tf.shape(tensor)], axis=0))
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
class ContrastiveLoss(object):
"""Contrastive training loss function."""
def __init__(self, projection_norm: bool = True, temperature: float = 1.0):
"""Initializes `ContrastiveLoss`.
Args:
projection_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
"""
self._projection_norm = projection_norm
self._temperature = temperature
def __call__(self, projection1: tf.Tensor, projection2: tf.Tensor):
"""Compute the contrastive loss for contrastive learning.
Note that projection2 is generated with the same batch (same order) of raw
images, but with different augmentation. More specifically:
image[i] -> random augmentation 1 -> projection -> projection1[i]
image[i] -> random augmentation 2 -> projection -> projection2[i]
Args:
projection1: projection vector of shape (bsz, dim).
projection2: projection vector of shape (bsz, dim).
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if self._projection_norm:
projection1 = tf.math.l2_normalize(projection1, -1)
projection2 = tf.math.l2_normalize(projection2, -1)
batch_size = tf.shape(projection1)[0]
p1_local, p2_local = projection1, projection2
# Gather projection1/projection2 across replicas and create local labels.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas_in_sync > 1:
p1_global = cross_replica_concat(p1_local, num_replicas_in_sync)
p2_global = cross_replica_concat(p2_local, num_replicas_in_sync)
global_batch_size = tf.shape(p1_global)[0]
replica_context = tf.distribute.get_replica_context()
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32),
tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels = tf.one_hot(labels_idx, global_batch_size * 2)
masks = tf.one_hot(labels_idx, global_batch_size)
else:
p1_global = p1_local
p2_global = p2_local
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
tb_matmul = functools.partial(tf.matmul, transpose_b=True)
logits_aa = tb_matmul(p1_local, p1_global) / self._temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tb_matmul(p2_local, p2_global) / self._temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tb_matmul(p1_local, p2_global) / self._temperature
logits_ba = tb_matmul(p2_local, p1_global) / self._temperature
loss_a_local = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ab, logits_aa], 1))
loss_b_local = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ba, logits_bb], 1))
loss_local = tf.reduce_mean(loss_a_local + loss_b_local)
return loss_local, (logits_ab, labels)
def get_config(self):
config = {
'projection_norm': self._projection_norm,
'temperature': self._temperature,
}
return config
| 5,426 | 37.21831 | 80 | py |
models | models-master/official/projects/simclr/losses/contrastive_losses_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.simclr.losses import contrastive_losses
class ContrastiveLossesTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(1.0, 0.5)
def test_contrastive_loss_computation(self, temperature):
batch_size = 2
project_dim = 16
projection_norm = False
p_1_arr = np.random.rand(batch_size, project_dim)
p_1 = tf.constant(p_1_arr, dtype=tf.float32)
p_2_arr = np.random.rand(batch_size, project_dim)
p_2 = tf.constant(p_2_arr, dtype=tf.float32)
losses_obj = contrastive_losses.ContrastiveLoss(
projection_norm=projection_norm,
temperature=temperature)
comp_contrastive_loss = losses_obj(
projection1=p_1,
projection2=p_2)
def _exp_sim(p1, p2):
return np.exp(np.matmul(p1, p2) / temperature)
l11 = - np.log(
_exp_sim(p_1_arr[0], p_2_arr[0]) /
(_exp_sim(p_1_arr[0], p_1_arr[1])
+ _exp_sim(p_1_arr[0], p_2_arr[1])
+ _exp_sim(p_1_arr[0], p_2_arr[0]))
) - np.log(
_exp_sim(p_1_arr[0], p_2_arr[0]) /
(_exp_sim(p_2_arr[0], p_2_arr[1])
+ _exp_sim(p_2_arr[0], p_1_arr[1])
+ _exp_sim(p_1_arr[0], p_2_arr[0]))
)
l22 = - np.log(
_exp_sim(p_1_arr[1], p_2_arr[1]) /
(_exp_sim(p_1_arr[1], p_1_arr[0])
+ _exp_sim(p_1_arr[1], p_2_arr[0])
+ _exp_sim(p_1_arr[1], p_2_arr[1]))
) - np.log(
_exp_sim(p_1_arr[1], p_2_arr[1]) /
(_exp_sim(p_2_arr[1], p_2_arr[0])
+ _exp_sim(p_2_arr[1], p_1_arr[0])
+ _exp_sim(p_1_arr[1], p_2_arr[1]))
)
exp_contrastive_loss = (l11 + l22) / 2.0
self.assertAlmostEqual(comp_contrastive_loss[0].numpy(),
exp_contrastive_loss, places=5)
if __name__ == '__main__':
tf.test.main()
| 2,496 | 31.012821 | 74 | py |
models | models-master/official/projects/simclr/heads/simclr_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SimCLR prediction heads."""
from typing import Optional, Text
import tensorflow as tf
from official.projects.simclr.modeling.layers import nn_blocks
regularizers = tf.keras.regularizers
layers = tf.keras.layers
class ProjectionHead(tf.keras.layers.Layer):
"""Projection head."""
def __init__(
self,
num_proj_layers: int = 3,
proj_output_dim: Optional[int] = None,
ft_proj_idx: int = 0,
kernel_initializer: Text = 'VarianceScaling',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""The projection head used during pretraining of SimCLR.
Args:
num_proj_layers: `int` number of Dense layers used.
proj_output_dim: `int` output dimension of projection head, i.e., output
dimension of the final layer.
ft_proj_idx: `int` index of layer to use during fine-tuning. 0 means no
projection head during fine tuning, -1 means the final layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ProjectionHead, self).__init__(**kwargs)
assert proj_output_dim is not None or num_proj_layers == 0
assert ft_proj_idx <= num_proj_layers, (num_proj_layers, ft_proj_idx)
self._proj_output_dim = proj_output_dim
self._num_proj_layers = num_proj_layers
self._ft_proj_idx = ft_proj_idx
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._layers = []
def get_config(self):
config = {
'proj_output_dim': self._proj_output_dim,
'num_proj_layers': self._num_proj_layers,
'ft_proj_idx': self._ft_proj_idx,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ProjectionHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._layers = []
if self._num_proj_layers > 0:
intermediate_dim = int(input_shape[-1])
for j in range(self._num_proj_layers):
if j != self._num_proj_layers - 1:
# for the middle layers, use bias and relu for the output.
layer = nn_blocks.DenseBN(
output_dim=intermediate_dim,
use_bias=True,
use_normalization=True,
activation='relu',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
name='nl_%d' % j)
else:
# for the final layer, neither bias nor relu is used.
layer = nn_blocks.DenseBN(
output_dim=self._proj_output_dim,
use_bias=False,
use_normalization=True,
activation=None,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
name='nl_%d' % j)
self._layers.append(layer)
super(ProjectionHead, self).build(input_shape)
def call(self, inputs, training=None):
hiddens_list = [tf.identity(inputs, 'proj_head_input')]
if self._num_proj_layers == 0:
proj_head_output = inputs
proj_finetune_output = inputs
else:
for j in range(self._num_proj_layers):
hiddens = self._layers[j](hiddens_list[-1], training)
hiddens_list.append(hiddens)
proj_head_output = tf.identity(
hiddens_list[-1], 'proj_head_output')
proj_finetune_output = tf.identity(
hiddens_list[self._ft_proj_idx], 'proj_finetune_output')
# The first element is the output of the projection head.
# The second element is the input of the finetune head.
return proj_head_output, proj_finetune_output
class ClassificationHead(tf.keras.layers.Layer):
"""Classification Head."""
def __init__(
self,
num_classes: int,
kernel_initializer: Text = 'random_uniform',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
name: Text = 'head_supervised',
**kwargs):
"""The classification head used during pretraining or fine tuning.
Args:
num_classes: `int` size of the output dimension or number of classes
for classification task.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
name: `str`, name of the layer.
**kwargs: keyword arguments to be passed.
"""
super(ClassificationHead, self).__init__(name=name, **kwargs)
self._num_classes = num_classes
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._name = name
def get_config(self):
config = {
'num_classes': self._num_classes,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
base_config = super(ClassificationHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._dense0 = layers.Dense(
units=self._num_classes,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=None)
super(ClassificationHead, self).build(input_shape)
def call(self, inputs, training=None):
inputs = self._dense0(inputs)
return inputs
| 7,666 | 37.527638 | 78 | py |
models | models-master/official/projects/simclr/heads/simclr_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.simclr.heads import simclr_head
class ProjectionHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(0, None),
(1, 128),
(2, 128),
)
def test_head_creation(self, num_proj_layers, proj_output_dim):
test_layer = simclr_head.ProjectionHead(
num_proj_layers=num_proj_layers,
proj_output_dim=proj_output_dim)
input_dim = 64
x = tf.keras.Input(shape=(input_dim,))
proj_head_output, proj_finetune_output = test_layer(x)
proj_head_output_dim = input_dim
if num_proj_layers > 0:
proj_head_output_dim = proj_output_dim
self.assertAllEqual(proj_head_output.shape.as_list(),
[None, proj_head_output_dim])
if num_proj_layers > 0:
proj_finetune_output_dim = input_dim
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[None, proj_finetune_output_dim])
@parameterized.parameters(
(0, None, 0),
(1, 128, 0),
(2, 128, 1),
(2, 128, 2),
)
def test_outputs(self, num_proj_layers, proj_output_dim, ft_proj_idx):
test_layer = simclr_head.ProjectionHead(
num_proj_layers=num_proj_layers,
proj_output_dim=proj_output_dim,
ft_proj_idx=ft_proj_idx
)
input_dim = 64
batch_size = 2
inputs = np.random.rand(batch_size, input_dim)
proj_head_output, proj_finetune_output = test_layer(inputs)
if num_proj_layers == 0:
self.assertAllClose(inputs, proj_head_output)
self.assertAllClose(inputs, proj_finetune_output)
else:
self.assertAllEqual(proj_head_output.shape.as_list(),
[batch_size, proj_output_dim])
if ft_proj_idx == 0:
self.assertAllClose(inputs, proj_finetune_output)
elif ft_proj_idx < num_proj_layers:
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[batch_size, input_dim])
else:
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[batch_size, proj_output_dim])
class ClassificationHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
10, 20
)
def test_head_creation(self, num_classes):
test_layer = simclr_head.ClassificationHead(num_classes=num_classes)
input_dim = 64
x = tf.keras.Input(shape=(input_dim,))
out_x = test_layer(x)
self.assertAllEqual(out_x.shape.as_list(),
[None, num_classes])
if __name__ == '__main__':
tf.test.main()
| 3,253 | 30.901961 | 74 | py |
models | models-master/official/projects/teams/teams_task_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for teams_task."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.projects.teams import teams
from official.projects.teams import teams_task
class TeamsPretrainTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((1, 1), (0, 1), (0, 0), (1, 0))
def test_task(self, num_shared_hidden_layers,
num_task_agnostic_layers):
config = teams_task.TeamsPretrainTaskConfig(
model=teams.TeamsPretrainerConfig(
generator=encoders.BertEncoderConfig(
vocab_size=30522, num_layers=2),
discriminator=encoders.BertEncoderConfig(
vocab_size=30522, num_layers=2),
num_shared_generator_hidden_layers=num_shared_hidden_layers,
num_discriminator_task_agnostic_layers=num_task_agnostic_layers,
),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = teams_task.TeamsPretrainTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| 2,184 | 37.333333 | 76 | py |
models | models-master/official/projects/teams/teams_pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TEAMS pre trainer network."""
import tensorflow as tf
from official.modeling import activations
from official.nlp.modeling.networks import encoder_scaffold
from official.nlp.modeling.networks import packed_sequence_embedding
from official.projects.teams import teams_pretrainer
class TeamsPretrainerTest(tf.test.TestCase):
# Build a transformer network to use within the TEAMS trainer.
def _get_network(self, vocab_size):
sequence_length = 512
hidden_size = 50
embedding_cfg = {
'vocab_size': vocab_size,
'type_vocab_size': 1,
'hidden_size': hidden_size,
'embedding_width': hidden_size,
'max_seq_length': sequence_length,
'initializer': tf.keras.initializers.TruncatedNormal(stddev=0.02),
'dropout_rate': 0.1,
}
embedding_inst = packed_sequence_embedding.PackedSequenceEmbedding(
**embedding_cfg)
hidden_cfg = {
'num_attention_heads':
2,
'intermediate_size':
3072,
'intermediate_activation':
activations.gelu,
'dropout_rate':
0.1,
'attention_dropout_rate':
0.1,
'kernel_initializer':
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
return encoder_scaffold.EncoderScaffold(
num_hidden_instances=2,
pooled_output_dim=hidden_size,
embedding_cfg=embedding_cfg,
embedding_cls=embedding_inst,
hidden_cfg=hidden_cfg,
dict_outputs=True)
def test_teams_pretrainer(self):
"""Validate that the Keras object can be created."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network.
candidate_size = 3
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=1,
vocab_size=vocab_size,
candidate_size=candidate_size)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
num_token_predictions = 2
sequence_length = 128
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
lm_positions = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
lm_ids = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = teams_trainer_model(inputs)
lm_outs = outputs['lm_outputs']
disc_rtd_logits = outputs['disc_rtd_logits']
disc_rtd_label = outputs['disc_rtd_label']
disc_mws_logits = outputs['disc_mws_logits']
disc_mws_label = outputs['disc_mws_label']
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
expected_disc_rtd_logits_shape = [None, sequence_length]
expected_disc_rtd_label_shape = [None, sequence_length]
expected_disc_disc_mws_logits_shape = [
None, num_token_predictions, candidate_size
]
expected_disc_disc_mws_label_shape = [None, num_token_predictions]
self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list())
self.assertAllEqual(expected_disc_rtd_logits_shape,
disc_rtd_logits.shape.as_list())
self.assertAllEqual(expected_disc_rtd_label_shape,
disc_rtd_label.shape.as_list())
self.assertAllEqual(expected_disc_disc_mws_logits_shape,
disc_mws_logits.shape.as_list())
self.assertAllEqual(expected_disc_disc_mws_label_shape,
disc_mws_label.shape.as_list())
def test_teams_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network.
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=2,
vocab_size=vocab_size,
candidate_size=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1, 1], [1, 0, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
lm_positions = tf.constant([[0, 1], [0, 2]], dtype=tf.int32)
lm_ids = tf.constant([[10, 20], [20, 30]], dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = teams_trainer_model(inputs)
def test_serialize_deserialize(self):
"""Validate that the TEAMS trainer can be serialized and deserialized."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=2,
vocab_size=vocab_size,
candidate_size=2)
# Create another TEAMS trainer via serialization and deserialization.
config = teams_trainer_model.get_config()
new_teams_trainer_model = teams_pretrainer.TeamsPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_teams_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(teams_trainer_model.get_config(),
new_teams_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 7,436 | 39.2 | 80 | py |
models | models-master/official/projects/teams/teams_pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for TEAMS models."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
_LOGIT_PENALTY_MULTIPLIER = 10000
class ReplacedTokenDetectionHead(tf.keras.layers.Layer):
"""Replaced token detection discriminator head.
Arguments:
encoder_cfg: Encoder config, used to create hidden layers and head.
num_task_agnostic_layers: Number of task agnostic layers in the
discriminator.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
encoder_cfg,
num_task_agnostic_layers,
output='logits',
name='rtd',
**kwargs):
super(ReplacedTokenDetectionHead, self).__init__(name=name, **kwargs)
self.num_task_agnostic_layers = num_task_agnostic_layers
self.hidden_size = encoder_cfg['embedding_cfg']['hidden_size']
self.num_hidden_instances = encoder_cfg['num_hidden_instances']
self.hidden_cfg = encoder_cfg['hidden_cfg']
self.activation = self.hidden_cfg['intermediate_activation']
self.initializer = self.hidden_cfg['kernel_initializer']
self.hidden_layers = []
for i in range(self.num_task_agnostic_layers, self.num_hidden_instances):
self.hidden_layers.append(
layers.Transformer(
num_attention_heads=self.hidden_cfg['num_attention_heads'],
intermediate_size=self.hidden_cfg['intermediate_size'],
intermediate_activation=self.activation,
dropout_rate=self.hidden_cfg['dropout_rate'],
attention_dropout_rate=self.hidden_cfg['attention_dropout_rate'],
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transformer/layer_%d_rtd' % i))
self.dense = tf.keras.layers.Dense(
self.hidden_size,
activation=self.activation,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transform/rtd_dense')
self.rtd_head = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transform/rtd_head')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, input_mask):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
input_mask: A [batch_size, seq_length] binary mask to separate the input
from the padding.
Returns:
A [batch_size, seq_length] tensor.
"""
attention_mask = layers.SelfAttentionMask()([sequence_data, input_mask])
data = sequence_data
for hidden_layer in self.hidden_layers:
data = hidden_layer([sequence_data, attention_mask])
rtd_logits = self.rtd_head(self.dense(data))
return tf.squeeze(rtd_logits, axis=-1)
class MultiWordSelectionHead(tf.keras.layers.Layer):
"""Multi-word selection discriminator head.
Arguments:
embedding_table: The embedding table.
activation: The activation, if any, for the dense layer.
initializer: The intializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
embedding_table,
activation=None,
initializer='glorot_uniform',
output='logits',
name='mws',
**kwargs):
super(MultiWordSelectionHead, self).__init__(name=name, **kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
self._vocab_size, self.embed_size = self.embedding_table.shape
self.dense = tf.keras.layers.Dense(
self.embed_size,
activation=self.activation,
kernel_initializer=self.initializer,
name='transform/mws_dense')
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/mws_layernorm')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, masked_positions, candidate_sets):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
masked_positions: A [batch_size, num_prediction] tensor.
candidate_sets: A [batch_size, num_prediction, k] tensor.
Returns:
A [batch_size, num_prediction, k] tensor.
"""
# Gets shapes for later usage
candidate_set_shape = tf_utils.get_shape_list(candidate_sets)
num_prediction = candidate_set_shape[1]
# Gathers hidden vectors -> (batch_size, num_prediction, 1, embed_size)
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
lm_data = tf.expand_dims(
tf.reshape(lm_data, [-1, num_prediction, self.embed_size]), 2)
# Gathers embeddings -> (batch_size, num_prediction, embed_size, k)
flat_candidate_sets = tf.reshape(candidate_sets, [-1])
candidate_embeddings = tf.gather(self.embedding_table, flat_candidate_sets)
candidate_embeddings = tf.reshape(
candidate_embeddings,
tf.concat([tf.shape(candidate_sets), [self.embed_size]], axis=0)
)
candidate_embeddings.set_shape(
candidate_sets.shape.as_list() + [self.embed_size])
candidate_embeddings = tf.transpose(candidate_embeddings, [0, 1, 3, 2])
# matrix multiplication + squeeze -> (batch_size, num_prediction, k)
logits = tf.matmul(lm_data, candidate_embeddings)
logits = tf.squeeze(logits, 2)
if self._output_type == 'logits':
return logits
return tf.nn.log_softmax(logits)
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of shape
(`batch_size`, `seq_length`, `num_hidden`) where `num_hidden` is
number of hidden units.
positions: Positions ids of tokens in batched sequences.
Returns:
Sequence tensor of shape (batch_size * num_predictions,
num_hidden).
"""
sequence_shape = tf_utils.get_shape_list(
sequence_tensor, name='sequence_output_tensor')
batch_size, seq_length, width = sequence_shape
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
@tf.keras.utils.register_keras_serializable(package='Text')
class TeamsPretrainer(tf.keras.Model):
"""TEAMS network training model.
This is an implementation of the network structure described in "Training
ELECTRA Augmented with Multi-word Selection"
(https://arxiv.org/abs/2106.00139).
The TeamsPretrainer allows a user to pass in two transformer encoders, one
for generator, the other for discriminator (multi-word selection). The
pretrainer then instantiates the masked language model (at generator side) and
classification networks (including both multi-word selection head and replaced
token detection head) that are used to create the training objectives.
*Note* that the model is constructed by Keras Subclass API, where layers are
defined inside `__init__` and `call()` implements the computation.
Args:
generator_network: A transformer encoder for generator, this network should
output a sequence output.
discriminator_mws_network: A transformer encoder for multi-word selection
discriminator, this network should output a sequence output.
num_discriminator_task_agnostic_layers: Number of layers shared between
multi-word selection and random token detection discriminators.
vocab_size: Size of generator output vocabulary
candidate_size: Candidate size for multi-word selection task,
including the correct word.
mlm_activation: The activation (if any) to use in the masked LM and
classification networks. If None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output_type: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
generator_network,
discriminator_mws_network,
num_discriminator_task_agnostic_layers,
vocab_size,
candidate_size=5,
mlm_activation=None,
mlm_initializer='glorot_uniform',
output_type='logits',
**kwargs):
super().__init__()
self._config = {
'generator_network':
generator_network,
'discriminator_mws_network':
discriminator_mws_network,
'num_discriminator_task_agnostic_layers':
num_discriminator_task_agnostic_layers,
'vocab_size':
vocab_size,
'candidate_size':
candidate_size,
'mlm_activation':
mlm_activation,
'mlm_initializer':
mlm_initializer,
'output_type':
output_type,
}
for k, v in kwargs.items():
self._config[k] = v
self.generator_network = generator_network
self.discriminator_mws_network = discriminator_mws_network
self.vocab_size = vocab_size
self.candidate_size = candidate_size
self.mlm_activation = mlm_activation
self.mlm_initializer = mlm_initializer
self.output_type = output_type
self.masked_lm = layers.MaskedLM(
embedding_table=self.generator_network.embedding_network
.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
output=output_type,
name='generator_masked_lm')
discriminator_cfg = self.discriminator_mws_network.get_config()
self.num_task_agnostic_layers = num_discriminator_task_agnostic_layers
self.discriminator_rtd_head = ReplacedTokenDetectionHead(
encoder_cfg=discriminator_cfg,
num_task_agnostic_layers=self.num_task_agnostic_layers,
output=output_type,
name='discriminator_rtd')
hidden_cfg = discriminator_cfg['hidden_cfg']
self.discriminator_mws_head = MultiWordSelectionHead(
embedding_table=self.discriminator_mws_network.embedding_network
.get_embedding_table(),
activation=hidden_cfg['intermediate_activation'],
initializer=hidden_cfg['kernel_initializer'],
output=output_type,
name='discriminator_mws')
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""TEAMS forward pass.
Args:
inputs: A dict of all inputs, same as the standard BERT model.
Returns:
outputs: A dict of pretrainer model outputs, including
(1) lm_outputs: A `[batch_size, num_token_predictions, vocab_size]`
tensor indicating logits on masked positions.
(2) disc_rtd_logits: A `[batch_size, sequence_length]` tensor indicating
logits for discriminator replaced token detection task.
(3) disc_rtd_label: A `[batch_size, sequence_length]` tensor indicating
target labels for discriminator replaced token detection task.
(4) disc_mws_logits: A `[batch_size, num_token_predictions,
candidate_size]` tensor indicating logits for discriminator multi-word
selection task.
(5) disc_mws_labels: A `[batch_size, num_token_predictions]` tensor
indicating target labels for discriminator multi-word selection task.
"""
input_word_ids = inputs['input_word_ids']
input_mask = inputs['input_mask']
input_type_ids = inputs['input_type_ids']
masked_lm_positions = inputs['masked_lm_positions']
# Runs generator.
sequence_output = self.generator_network(
[input_word_ids, input_mask, input_type_ids])['sequence_output']
lm_outputs = self.masked_lm(sequence_output, masked_lm_positions)
# Samples tokens from generator.
fake_data = self._get_fake_data(inputs, lm_outputs)
# Runs discriminator.
disc_input = fake_data['inputs']
disc_rtd_label = fake_data['is_fake_tokens']
disc_mws_candidates = fake_data['candidate_set']
mws_sequence_outputs = self.discriminator_mws_network([
disc_input['input_word_ids'], disc_input['input_mask'],
disc_input['input_type_ids']
])['encoder_outputs']
# Applies replaced token detection with input selected based on
# self.num_discriminator_task_agnostic_layers
disc_rtd_logits = self.discriminator_rtd_head(
mws_sequence_outputs[self.num_task_agnostic_layers - 1], input_mask)
# Applies multi-word selection.
disc_mws_logits = self.discriminator_mws_head(mws_sequence_outputs[-1],
masked_lm_positions,
disc_mws_candidates)
disc_mws_label = tf.zeros_like(masked_lm_positions, dtype=tf.int32)
outputs = {
'lm_outputs': lm_outputs,
'disc_rtd_logits': disc_rtd_logits,
'disc_rtd_label': disc_rtd_label,
'disc_mws_logits': disc_mws_logits,
'disc_mws_label': disc_mws_label,
}
return outputs
def _get_fake_data(self, inputs, mlm_logits):
"""Generate corrupted data for discriminator.
Note it is poosible for sampled token to be the same as the correct one.
Args:
inputs: A dict of all inputs, same as the input of `call()` function
mlm_logits: The generator's output logits
Returns:
A dict of generated fake data
"""
inputs = models.electra_pretrainer.unmask(inputs, duplicate=True)
# Samples replaced token.
sampled_tokens = tf.stop_gradient(
models.electra_pretrainer.sample_from_softmax(
mlm_logits, disallow=None))
sampled_tokids = tf.argmax(sampled_tokens, axis=-1, output_type=tf.int32)
# Prepares input and label for replaced token detection task.
updated_input_ids, masked = models.electra_pretrainer.scatter_update(
inputs['input_word_ids'], sampled_tokids, inputs['masked_lm_positions'])
rtd_labels = masked * (1 - tf.cast(
tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32))
updated_inputs = models.electra_pretrainer.get_updated_inputs(
inputs, duplicate=True, input_word_ids=updated_input_ids)
# Samples (candidate_size-1) negatives and concat with true tokens
disallow = tf.one_hot(
inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32)
sampled_candidates = tf.stop_gradient(
sample_k_from_softmax(mlm_logits, k=self.candidate_size-1,
disallow=disallow))
true_token_id = tf.expand_dims(inputs['masked_lm_ids'], -1)
candidate_set = tf.concat([true_token_id, sampled_candidates], -1)
return {
'inputs': updated_inputs,
'is_fake_tokens': rtd_labels,
'sampled_tokens': sampled_tokens,
'candidate_set': candidate_set
}
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.discriminator_mws_network)
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def sample_k_from_softmax(logits, k, disallow=None, use_topk=False):
"""Implement softmax sampling using gumbel softmax trick to select k items.
Args:
logits: A [batch_size, num_token_predictions, vocab_size] tensor indicating
the generator output logits for each masked position.
k: Number of samples
disallow: If `None`, we directly sample tokens from the logits. Otherwise,
this is a tensor of size [batch_size, num_token_predictions, vocab_size]
indicating the true word id in each masked position.
use_topk: Whether to use tf.nn.top_k or using iterative approach where the
latter is empirically faster.
Returns:
sampled_tokens: A [batch_size, num_token_predictions, k] tensor indicating
the sampled word id in each masked position.
"""
if use_topk:
if disallow is not None:
logits -= _LOGIT_PENALTY_MULTIPLIER * disallow
uniform_noise = tf.random.uniform(
tf_utils.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9)
_, sampled_tokens = tf.nn.top_k(logits + gumbel_noise, k=k, sorted=False)
else:
sampled_tokens_list = []
vocab_size = tf_utils.get_shape_list(logits)[-1]
if disallow is not None:
logits -= _LOGIT_PENALTY_MULTIPLIER * disallow
uniform_noise = tf.random.uniform(
tf_utils.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9)
logits += gumbel_noise
for _ in range(k):
token_ids = tf.argmax(logits, -1, output_type=tf.int32)
sampled_tokens_list.append(token_ids)
logits -= _LOGIT_PENALTY_MULTIPLIER * tf.one_hot(
token_ids, depth=vocab_size, dtype=tf.float32)
sampled_tokens = tf.stack(sampled_tokens_list, -1)
return sampled_tokens
| 18,683 | 39.267241 | 100 | py |
models | models-master/official/projects/teams/teams.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TEAMS model configurations and instantiation methods."""
import dataclasses
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@dataclasses.dataclass
class TeamsPretrainerConfig(base_config.Config):
"""Teams pretrainer configuration."""
# Candidate size for multi-word selection task, including the correct word.
candidate_size: int = 5
# Weight for the generator masked language model task.
generator_loss_weight: float = 1.0
# Weight for the replaced token detection task.
discriminator_rtd_loss_weight: float = 5.0
# Weight for the multi-word selection task.
discriminator_mws_loss_weight: float = 2.0
# Whether share embedding network between generator and discriminator.
tie_embeddings: bool = True
# Number of bottom layers shared between generator and discriminator.
# Non-positive value implies no sharing.
num_shared_generator_hidden_layers: int = 3
# Number of bottom layers shared between different discriminator tasks.
num_discriminator_task_agnostic_layers: int = 11
generator: encoders.BertEncoderConfig = dataclasses.field(
default_factory=encoders.BertEncoderConfig
)
discriminator: encoders.BertEncoderConfig = dataclasses.field(
default_factory=encoders.BertEncoderConfig
)
class TeamsEncoderConfig(encoders.BertEncoderConfig):
pass
@gin.configurable
@base_config.bind(TeamsEncoderConfig)
def get_encoder(bert_config: TeamsEncoderConfig,
embedding_network=None,
hidden_layers=None):
"""Gets a 'EncoderScaffold' object.
Args:
bert_config: A 'modeling.BertConfig'.
embedding_network: Embedding network instance.
hidden_layers: List of hidden layer instances.
Returns:
A encoder object.
"""
embedding_cfg = dict(
vocab_size=bert_config.vocab_size,
type_vocab_size=bert_config.type_vocab_size,
hidden_size=bert_config.hidden_size,
embedding_width=bert_config.embedding_size,
max_seq_length=bert_config.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
dropout_rate=bert_config.dropout_rate,
)
hidden_cfg = dict(
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_activation=tf_utils.get_activation(
bert_config.hidden_activation),
dropout_rate=bert_config.dropout_rate,
attention_dropout_rate=bert_config.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
)
if embedding_network is None:
embedding_network = networks.PackedSequenceEmbedding
if hidden_layers is None:
hidden_layers = layers.Transformer
kwargs = dict(
embedding_cfg=embedding_cfg,
embedding_cls=embedding_network,
hidden_cls=hidden_layers,
hidden_cfg=hidden_cfg,
num_hidden_instances=bert_config.num_layers,
pooled_output_dim=bert_config.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
dict_outputs=True)
# Relies on gin configuration to define the Transformer encoder arguments.
return networks.EncoderScaffold(**kwargs)
| 4,096 | 36.245455 | 77 | py |
models | models-master/official/projects/teams/teams_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-return-or-yield,line-too-long
"""TEAMS experiments."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.projects.teams import teams
from official.projects.teams import teams_task
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class TeamsOptimizationConfig(optimization.OptimizationConfig):
"""TEAMS optimization config."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type="adamw",
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
epsilon=1e-6))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type="polynomial",
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type="polynomial", polynomial=PolynomialWarmupConfig(warmup_steps=10000))
@exp_factory.register_config_factory("teams/pretraining")
def teams_pretrain() -> cfg.ExperimentConfig:
"""TEAMS pretraining."""
config = cfg.ExperimentConfig(
task=teams_task.TeamsPretrainTaskConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=cfg.TrainerConfig(
optimizer_config=TeamsOptimizationConfig(), train_steps=1000000),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/sentence_prediction")
def teams_sentence_prediction() -> cfg.ExperimentConfig:
r"""Teams GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
@exp_factory.register_config_factory("teams/squad")
def teams_squad() -> cfg.ExperimentConfig:
"""Teams Squad V1/V2."""
config = cfg.ExperimentConfig(
task=question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(
encoder=encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))),
train_data=question_answering_dataloader.QADataConfig(),
validation_data=question_answering_dataloader.QADataConfig()),
trainer=cfg.TrainerConfig(optimizer_config=TeamsOptimizationConfig()),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
| 4,424 | 39.59633 | 79 | py |
models | models-master/official/projects/teams/teams_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TEAMS pretraining task (Joint Masked LM, Replaced Token Detection and )."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.projects.teams import teams
from official.projects.teams import teams_pretrainer
@dataclasses.dataclass
class TeamsPretrainTaskConfig(cfg.TaskConfig):
"""The model config."""
model: teams.TeamsPretrainerConfig = dataclasses.field(
default_factory=teams.TeamsPretrainerConfig
)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def _get_generator_hidden_layers(discriminator_network, num_hidden_layers,
num_shared_layers):
if num_shared_layers <= 0:
num_shared_layers = 0
hidden_layers = []
else:
hidden_layers = discriminator_network.hidden_layers[:num_shared_layers]
for _ in range(num_shared_layers, num_hidden_layers):
hidden_layers.append(layers.Transformer)
return hidden_layers
def _build_pretrainer(
config: teams.TeamsPretrainerConfig) -> teams_pretrainer.TeamsPretrainer:
"""Instantiates TeamsPretrainer from the config."""
generator_encoder_cfg = config.generator
discriminator_encoder_cfg = config.discriminator
discriminator_network = teams.get_encoder(discriminator_encoder_cfg)
# Copy discriminator's embeddings to generator for easier model serialization.
hidden_layers = _get_generator_hidden_layers(
discriminator_network, generator_encoder_cfg.num_layers,
config.num_shared_generator_hidden_layers)
if config.tie_embeddings:
generator_network = teams.get_encoder(
generator_encoder_cfg,
embedding_network=discriminator_network.embedding_network,
hidden_layers=hidden_layers)
else:
generator_network = teams.get_encoder(
generator_encoder_cfg, hidden_layers=hidden_layers)
return teams_pretrainer.TeamsPretrainer(
generator_network=generator_network,
discriminator_mws_network=discriminator_network,
num_discriminator_task_agnostic_layers=config
.num_discriminator_task_agnostic_layers,
vocab_size=generator_encoder_cfg.vocab_size,
candidate_size=config.candidate_size,
mlm_activation=tf_utils.get_activation(
generator_encoder_cfg.hidden_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=generator_encoder_cfg.initializer_range))
@task_factory.register_task_cls(TeamsPretrainTaskConfig)
class TeamsPretrainTask(base_task.Task):
"""TEAMS Pretrain Task (Masked LM + RTD + MWS)."""
def build_model(self):
return _build_pretrainer(self.task_config.model)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
with tf.name_scope('TeamsPretrainTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
# Generator MLM loss.
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['lm_outputs'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['masked_lm_loss'].update_state(mlm_loss)
weight = self.task_config.model.generator_loss_weight
total_loss = weight * mlm_loss
# Discriminator RTD loss.
rtd_logits = model_outputs['disc_rtd_logits']
rtd_labels = tf.cast(model_outputs['disc_rtd_label'], tf.float32)
input_mask = tf.cast(labels['input_mask'], tf.float32)
rtd_ind_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=rtd_logits, labels=rtd_labels)
rtd_numerator = tf.reduce_sum(input_mask * rtd_ind_loss)
rtd_denominator = tf.reduce_sum(input_mask)
rtd_loss = tf.math.divide_no_nan(rtd_numerator, rtd_denominator)
metrics['replaced_token_detection_loss'].update_state(rtd_loss)
weight = self.task_config.model.discriminator_rtd_loss_weight
total_loss = total_loss + weight * rtd_loss
# Discriminator MWS loss.
mws_logits = model_outputs['disc_mws_logits']
mws_labels = model_outputs['disc_mws_label']
mws_loss = tf.keras.losses.sparse_categorical_crossentropy(
mws_labels, mws_logits, from_logits=True)
mws_numerator_loss = tf.reduce_sum(mws_loss * lm_label_weights)
mws_denominator_loss = tf.reduce_sum(lm_label_weights)
mws_loss = tf.math.divide_no_nan(mws_numerator_loss, mws_denominator_loss)
metrics['multiword_selection_loss'].update_state(mws_loss)
weight = self.task_config.model.discriminator_mws_loss_weight
total_loss = total_loss + weight * mws_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
metrics['total_loss'].update_state(total_loss)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return pretrain_dataloader.BertPretrainDataLoader(params).load(
input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='masked_lm_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='replaced_token_detection_accuracy'),
tf.keras.metrics.Mean(name='replaced_token_detection_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='multiword_selection_accuracy'),
tf.keras.metrics.Mean(name='multiword_selection_loss'),
tf.keras.metrics.Mean(name='total_loss'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with tf.name_scope('TeamsPretrainTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'],
model_outputs['lm_outputs'],
labels['masked_lm_weights'])
if 'replaced_token_detection_accuracy' in metrics:
rtd_logits_expanded = tf.expand_dims(model_outputs['disc_rtd_logits'],
-1)
rtd_full_logits = tf.concat(
[-1.0 * rtd_logits_expanded, rtd_logits_expanded], -1)
metrics['replaced_token_detection_accuracy'].update_state(
model_outputs['disc_rtd_label'], rtd_full_logits,
labels['input_mask'])
if 'multiword_selection_accuracy' in metrics:
metrics['multiword_selection_accuracy'].update_state(
model_outputs['disc_mws_label'], model_outputs['disc_mws_logits'],
labels['masked_lm_weights'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 10,289 | 39.352941 | 80 | py |
models | models-master/official/projects/teams/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/teams/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Teams training driver, register Teams configs."""
# pylint: disable=unused-import
from absl import app
from official.common import flags as tfm_flags
from official.nlp import tasks
from official.nlp import train
from official.projects.teams import teams_experiments
from official.projects.teams import teams_task
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,028 | 34.482759 | 76 | py |
models | models-master/official/projects/roformer/roformer_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer attention layer."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
EinsumDense = tf.keras.layers.EinsumDense
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
def _build_trig_vector(length, key_dim):
"""Builds the trig vector."""
tf_dtype = tf.keras.mixed_precision.global_policy().compute_dtype
position_ids = tf.cast(tf.range(length), dtype=tf_dtype)
position_ids = tf.expand_dims(position_ids, axis=0)
steps = key_dim // 2
# 2 (i - 1) / key_dim = (i - 1) / steps: (-1 achieved with zero-indexing)
wavenumber_exponent = -tf.cast(tf.range(steps), dtype=tf_dtype) / steps
wavenumbers = tf.pow(
tf.constant(10000.0, dtype=tf_dtype), wavenumber_exponent
)
vec = tf.einsum('bl,d->bld', position_ids, wavenumbers)
sin_vec = tf.repeat(tf.sin(vec), repeats=2, axis=-1)
cos_vec = tf.repeat(tf.cos(vec), repeats=2, axis=-1)
sin_vec, cos_vec = tf.expand_dims(sin_vec, 2), tf.expand_dims(cos_vec, 2)
return sin_vec, cos_vec
@tf.keras.utils.register_keras_serializable(package='Text')
class RoformerAttention(tf.keras.layers.MultiHeadAttention):
"""Roformer Attention."""
def __init__(self,
q_max_sequence_length,
kv_max_sequence_length,
output_range=None,
**kwargs):
"""Instantiates a roformer attention layer.
Roformer paper: https://arxiv.org/abs/2104.09864
Args:
q_max_sequence_length: maximum length in input for the query
kv_max_sequence_length: maximum length in input for key and value, can be
different from q_max_sequence_length
output_range: length of the query tensor to consider.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._q_max_sequence_length = q_max_sequence_length
self._kv_max_sequence_length = kv_max_sequence_length
assert self._key_dim % 2 == 0
q_sin_vec, q_cos_vec = _build_trig_vector(self._q_max_sequence_length,
self._key_dim)
k_sin_vec, k_cos_vec = _build_trig_vector(self._kv_max_sequence_length,
self._key_dim)
# pylint:disable=g-long-ternary
self.q_sin_vec, self.q_cos_vec = (q_sin_vec,
q_cos_vec) if output_range is None else (
q_sin_vec[:, 0:output_range, ...],
q_cos_vec[:, 0:output_range, ...])
# pylint:enable=g-long-ternary
self.k_sin_vec, self.k_cos_vec = (k_sin_vec, k_cos_vec)
def roformer_recompute_qkv(self, q, k, v):
q_shape = tf.shape(q)
q_len = q_shape[1]
k_shape = tf.shape(k)
k_len = k_shape[1]
q2 = tf.stack([-q[..., 1::2], q[..., ::2]], axis=4)
q2 = tf.reshape(q2, q_shape)
k2 = tf.stack([-k[..., 1::2], k[..., ::2]], axis=4)
k2 = tf.reshape(k2, k_shape)
ret_q = q * self.q_cos_vec[:, 0:q_len,
...] + q2 * self.q_sin_vec[:, 0:q_len, ...]
ret_w = k * self.k_cos_vec[:, 0:k_len,
...] + k2 * self.k_sin_vec[:, 0:k_len, ...]
return ret_q, ret_w, v
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
query = self._query_dense(query)
key = self._key_dense(key)
value = self._value_dense(value)
query, key, value = self.roformer_recompute_qkv(query, key, value)
attention_output, attention_scores = self._compute_attention(
query, key, value, attention_mask, training)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| 4,638 | 38.313559 | 91 | py |
models | models-master/official/projects/roformer/roformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer model configurations and instantiation methods."""
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.roformer import roformer_encoder
class RoformerEncoderConfig(encoders.BertEncoderConfig):
pass
@base_config.bind(RoformerEncoderConfig)
def get_encoder(encoder_cfg: RoformerEncoderConfig):
"""Gets a 'RoformerEncoder' object.
Args:
encoder_cfg: A 'RoformerEncoderConfig'.
Returns:
A encoder object.
"""
return roformer_encoder.RoformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
| 2,002 | 36.092593 | 74 | py |
models | models-master/official/projects/roformer/roformer_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer experiment configurations."""
# pylint: disable=g-doc-return-or-yield,line-too-long
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import sentence_prediction
from official.projects.roformer import roformer
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class RoformerOptimizationConfig(optimization.OptimizationConfig):
"""TEAMS optimization config."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type='adamw',
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'],
epsilon=1e-6))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type='polynomial',
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type='polynomial', polynomial=PolynomialWarmupConfig(warmup_steps=10000))
@exp_factory.register_config_factory('roformer/pretraining')
def roformer_pretraining() -> cfg.ExperimentConfig:
"""BERT pretraining experiment."""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=masked_lm.MaskedLMConfig(
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='any', any=roformer.RoformerEncoderConfig()),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
use_v2_feature_names=True, is_training=False)),
trainer=cfg.TrainerConfig(
optimizer_config=RoformerOptimizationConfig(), train_steps=1000000),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('roformer/glue')
def roformer_glue() -> cfg.ExperimentConfig:
r"""BigBird GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=roformer.RoformerEncoderConfig())),
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 4,797 | 38.00813 | 79 | py |
models | models-master/official/projects/roformer/roformer_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.roformer import roformer_encoder
class RoformerEncoderTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(RoformerEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 21),
("output_range", 1, 1),
)
def test_network_invocation(self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=512,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False)
network = roformer_encoder.RoformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["inner_activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["inner_activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = roformer_encoder.RoformerEncoder.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
_ = tf.keras.models.load_model(model_path)
if __name__ == "__main__":
tf.test.main()
| 9,514 | 40.190476 | 80 | py |
models | models-master/official/projects/roformer/roformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer TransformerEncoder block layer."""
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.roformer import roformer_attention
@tf.keras.utils.register_keras_serializable(package="Text")
class RoformerEncoderBlock(tf.keras.layers.Layer):
"""RoformerEncoderBlock layer."""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
q_max_sequence_length=512,
kv_max_sequence_length=512,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `RoformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
q_max_sequence_length: The maximum sequence length of queries.
kv_max_sequence_length: The maximum sequence length of keys and values.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
if inner_dim % 2 != 0:
raise ValueError(f"The inner_dim of f{self.__class__} must be an even "
f"integer. However, inner_dim is f{inner_dim}")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._q_max_sequence_length = q_max_sequence_length
self._kv_max_sequence_length = kv_max_sequence_length
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = roformer_attention.RoformerAttention(
q_max_sequence_length=self._q_max_sequence_length,
kv_max_sequence_length=self._kv_max_sequence_length,
output_range=self._output_range,
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(RoformerEncoderBlock, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes":
self._attention_axes,
}
base_config = super(RoformerEncoderBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
| 13,507 | 42.434084 | 80 | py |
models | models-master/official/projects/roformer/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/projects/roformer/roformer_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.projects.roformer import roformer_attention
def _create_mock_attention_data(num_heads,
key_dim,
value_dim,
q_seq_length,
kv_seq_length,
batch_size,
include_mask=False):
"""Creates mock testing data.
Args:
num_heads: `int`, Number of attention heads.
key_dim: `int`, Size of query head.
value_dim: `int`, Size of key, value dim.
q_seq_length: query sequence length.
kv_seq_length: key/value sequence length.
batch_size: `int`, the batch size.
include_mask: optional `bool`, whether or not to include mask data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
query_shape = (batch_size, q_seq_length, key_dim)
value_shape = (batch_size, kv_seq_length, value_dim)
data = dict(
query=tf.random.normal(shape=query_shape),
value=tf.random.normal(shape=value_shape),
key=tf.random.normal(shape=value_shape))
total_seq_length = kv_seq_length
if include_mask:
mask_shape = (batch_size, num_heads, q_seq_length, total_seq_length)
mask_data = np.random.randint(2, size=mask_shape).astype("float32")
mask_data = dict(attention_mask=mask_data)
data.update(mask_data)
return data
class RoformerAttentionTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RoformerAttentionTest, self).setUp()
np.random.seed(0)
tf.random.set_seed(0)
@combinations.generate(
combinations.combine(length=[8, 50], key_dim=[64, 128]))
def test_trig_vector(self, length, key_dim):
sin_emb, cos_emb = roformer_attention._build_trig_vector(length, key_dim)
length = tf.shape(sin_emb)[1]
key_dim = tf.shape(sin_emb)[3]
for m in range(0, length):
half_d = key_dim // 2
std_emb = tf.range(half_d, dtype=tf.float32)
std_emb = tf.pow(10000.0, -std_emb / float(half_d))
std_emb = m * std_emb
std_sin_emb = tf.sin(std_emb)
std_cos_emb = tf.cos(std_emb)
tf.assert_equal(sin_emb[:, m, :, 0::2], std_sin_emb)
tf.assert_equal(sin_emb[:, m, :, 1::2], std_sin_emb)
tf.assert_equal(cos_emb[:, m, :, 0::2], std_cos_emb)
tf.assert_equal(cos_emb[:, m, :, 1::2], std_cos_emb)
@combinations.generate(
combinations.combine(value_dim=[32, 64], mask=[True, False]))
def test_attention_scores(self, value_dim, mask):
"""Tests combinations of attention score calculations."""
batch_size, num_heads, key_dim, seq_length = 2, 12, 64, 8
test_layer = roformer_attention.RoformerAttention(
q_max_sequence_length=seq_length,
kv_max_sequence_length=seq_length,
num_heads=num_heads,
key_dim=key_dim,
value_dim=value_dim)
data = _create_mock_attention_data(
num_heads=num_heads,
key_dim=key_dim,
value_dim=value_dim,
q_seq_length=seq_length,
kv_seq_length=seq_length,
batch_size=batch_size,
include_mask=mask)
output = test_layer(**data)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
if __name__ == "__main__":
tf.test.main()
| 4,037 | 34.113043 | 77 | py |
models | models-master/official/projects/roformer/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A customized training library for the specific task."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.roformer import roformer_experiments # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,566 | 35.671429 | 92 | py |
models | models-master/official/projects/roformer/roformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer encoder network."""
# pylint: disable=g-classes-have-attributes
import collections
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.projects.roformer import roformer_encoder_block
@tf.keras.utils.register_keras_serializable(package='Text')
class RoformerEncoder(tf.keras.Model):
"""Bi-directional Transformer-based encoder network with Roformer.
Roformer paper: https://arxiv.org/abs/2104.09864
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size,
hidden_size=768, # FIXME: hidden_size per head should be even!
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
inner_dim=3072,
inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),
output_dropout=0.1,
attention_dropout=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
output_range=None,
embedding_width=None,
embedding_layer=None,
norm_first=False,
**kwargs):
if 'intermediate_size' in kwargs:
inner_dim = kwargs['intermediate_size']
del kwargs['intermediate_size']
if 'activation' in kwargs:
inner_activation = kwargs['activation']
del kwargs['activation']
if 'dropout_rate' in kwargs:
output_dropout = kwargs['dropout_rate']
del kwargs['dropout_rate']
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs['attention_dropout_rate']
del kwargs['attention_dropout_rate']
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
embedding_layer_inst = layers.on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
embedding_layer_inst = embedding_layer
word_embeddings = embedding_layer_inst(word_ids)
# Roformer does not need a position embedding layer
type_embedding_layer = layers.on_device_embedding.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
# Roformer does not have absolute position embedding
embeddings = tf.keras.layers.Add()([word_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
embeddings = embedding_projection(embeddings)
else:
embedding_projection = None
transformer_layers = []
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
encoder_outputs = []
for i in range(num_layers):
if i == num_layers - 1 and output_range is not None:
transformer_output_range = output_range
else:
transformer_output_range = None
layer = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
q_max_sequence_length=max_sequence_length,
kv_max_sequence_length=max_sequence_length,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=transformer_output_range,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='roformer/layer_%d' % i)
transformer_layers.append(layer)
data = layer([data, attention_mask])
encoder_outputs.append(data)
last_encoder_output = encoder_outputs[-1]
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = last_encoder_output[:, 0, :]
pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
cls_output = pooler_layer(first_token_tensor)
outputs = dict(
sequence_output=encoder_outputs[-1],
pooled_output=cls_output,
encoder_outputs=encoder_outputs,
)
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(RoformerEncoder, self).__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._pooler_layer = pooler_layer
self._transformer_layers = transformer_layers
self._embedding_norm_layer = embedding_norm_layer
self._embedding_layer = embedding_layer_inst
# self._position_embedding_layer = position_embedding_layer
self._position_embedding_layer = None
self._type_embedding_layer = type_embedding_layer
if embedding_projection is not None:
self._embedding_projection = embedding_projection
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config._asdict())
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 11,374 | 40.514599 | 80 | py |
models | models-master/official/projects/roformer/roformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.roformer import roformer_encoder_block
@parameterized.named_parameters(
('base', roformer_encoder_block.RoformerEncoderBlock))
class RoformerEncoderBlockTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(RoformerEncoderBlockTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
class RoformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_raises(self):
num_attention_heads = 2
with self.assertRaisesRegex(ValueError, 'The inner_dim of.*'):
_ = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=31,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = roformer_encoder_block.RoformerEncoderBlock.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = roformer_encoder_block.RoformerEncoderBlock(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
seq_len = 21
dimensions = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(seq_len, dimensions))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 12,909 | 38.96904 | 80 | py |
models | models-master/official/projects/yt8m/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yt8m/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YT8M model training driver."""
from absl import app
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.projects.yt8m.configs import yt8m
from official.projects.yt8m.tasks import yt8m_task
# pylint: enable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 987 | 31.933333 | 74 | py |
models | models-master/official/projects/yt8m/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yt8m import train as train_lib
from official.projects.yt8m.dataloaders import utils
from official.vision.dataloaders import tfexample_utils
FLAGS = flags.FLAGS
class TrainTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
examples = [utils.make_yt8m_example() for _ in range(8)]
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
@parameterized.named_parameters(
dict(
testcase_name='segment_with_avg_precison',
use_segment_level_labels=True,
use_average_precision_metric=True,
),
dict(
testcase_name='video_with_avg_precison',
use_segment_level_labels=False,
use_average_precision_metric=True,
),
dict(
testcase_name='segment',
use_segment_level_labels=True,
use_average_precision_metric=False,
),
dict(
testcase_name='video',
use_segment_level_labels=False,
use_average_precision_metric=False,
),
)
def test_train_and_eval(
self, use_segment_level_labels, use_average_precision_metric
):
saved_flag_values = flagsaver.save_flag_values()
train_lib.tfm_flags.define_flags()
FLAGS.mode = 'train'
FLAGS.model_dir = self._model_dir
FLAGS.experiment = 'yt8m_experiment'
FLAGS.tpu = ''
average_precision = {'top_k': 20} if use_average_precision_metric else None
params_override = json.dumps({
'runtime': {
'distribution_strategy': 'mirrored',
'mixed_precision_dtype': 'float32',
},
'trainer': {
'train_steps': 2,
'validation_steps': 2,
},
'task': {
'model': {
'backbone': {
'type': 'dbof',
'dbof': {
'cluster_size': 16,
'hidden_size': 16,
'use_context_gate_cluster_layer': True,
},
},
'head': {
'type': 'moe',
'moe': {
'use_input_context_gate': True,
'use_output_context_gate': True,
},
},
},
'train_data': {
'input_path': self._data_path,
'global_batch_size': 4,
},
'validation_data': {
'input_path': self._data_path,
'segment_labels': use_segment_level_labels,
'global_batch_size': 4,
},
'evaluation': {
'average_precision': average_precision,
},
},
})
FLAGS.params_override = params_override
with train_lib.train.gin.unlock_config():
train_lib.train.main('unused_args')
FLAGS.mode = 'eval'
with train_lib.train.gin.unlock_config():
train_lib.train.main('unused_args')
flagsaver.restore_flag_values(saved_flag_values)
if __name__ == '__main__':
tf.config.set_soft_device_placement(True)
tf.test.main()
| 4,164 | 30.55303 | 79 | py |
models | models-master/official/projects/yt8m/eval_utils/eval_util_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yt8m.eval_utils.average_precision_calculator import AveragePrecisionCalculator
class YT8MAveragePrecisionCalculatorTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super().setUp()
self.prediction = np.array([
[0.98, 0.88, 0.77, 0.65, 0.64, 0.59, 0.45, 0.43, 0.20, 0.05],
[0.878, 0.832, 0.759, 0.621, 0.458, 0.285, 0.134],
[0.98],
[0.56],
], dtype=object)
self.raw_prediction = np.random.rand(5, 10) + np.random.randint(
low=0, high=10, size=(5, 10))
self.ground_truth = np.array([[1, 1, 0, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0], [1], [0]],
dtype=object)
self.expected_ap = np.array([
0.714,
0.722,
1.000,
0.000,
])
def test_ap_calculator_ap(self):
# Compare Expected Average Precision with function expected
for i, _ in enumerate(self.ground_truth):
calculator = AveragePrecisionCalculator()
ap = calculator.ap(self.prediction[i], self.ground_truth[i])
logging.info('DEBUG %dth AP: %r', i + 1, ap)
def test_ap_calculator_zero_one_normalize(self):
for i, _ in enumerate(self.raw_prediction):
calculator = AveragePrecisionCalculator()
logging.error('%r', self.raw_prediction[i])
normalized_score = calculator._zero_one_normalize(self.raw_prediction[i])
self.assertAllInRange(normalized_score, lower_bound=0.0, upper_bound=1.0)
@parameterized.parameters((None,), (3,), (5,), (10,), (20,))
def test_ap_calculator_ap_at_n(self, n):
for i, _ in enumerate(self.ground_truth):
calculator = AveragePrecisionCalculator(n)
ap = calculator.ap_at_n(self.prediction[i], self.ground_truth[i], n)
logging.info('DEBUG %dth AP: %r', i + 1, ap)
if __name__ == '__main__':
tf.test.main()
| 2,636 | 35.625 | 101 | py |
models | models-master/official/projects/yt8m/eval_utils/eval_util.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions to help with evaluating models."""
import logging
import numpy as np
import tensorflow as tf
from official.projects.yt8m.eval_utils import average_precision_calculator as ap_calculator
from official.projects.yt8m.eval_utils import mean_average_precision_calculator as map_calculator
def flatten(l):
"""Merges a list of lists into a single list."""
# pylint: disable=g-complex-comprehension
return [item for sublist in l for item in sublist]
# pylint: enable=g-complex-comprehension
def calculate_hit_at_one(predictions, actuals):
"""Performs a local (numpy) calculation of the hit at one.
Args:
predictions: Matrix containing the outputs of the model. Dimensions are
'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x
'num_classes'.
Returns:
float: The average hit at one across the entire batch.
"""
top_prediction = np.argmax(predictions, 1)
hits = actuals[np.arange(actuals.shape[0]), top_prediction]
return np.average(hits)
def calculate_precision_at_equal_recall_rate(predictions, actuals):
"""Performs a local (numpy) calculation of the PERR.
Args:
predictions: Matrix containing the outputs of the model. Dimensions are
'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x
'num_classes'.
Returns:
float: The average precision at equal recall rate across the entire batch.
"""
aggregated_precision = 0.0
num_videos = actuals.shape[0]
if num_videos == 0:
logging.warning("Num_videos is 0, returning 0.0 aggregated_precision.")
return aggregated_precision
for row in np.arange(num_videos):
num_labels = int(np.sum(actuals[row]))
top_indices = np.argpartition(predictions[row], -num_labels)[-num_labels:]
item_precision = 0.0
for label_index in top_indices:
if predictions[row][label_index] > 0:
item_precision += actuals[row][label_index]
item_precision /= top_indices.size
aggregated_precision += item_precision
aggregated_precision /= num_videos
return aggregated_precision
def calculate_gap(predictions, actuals, top_k=20):
"""Performs a local (numpy) calculation of the global average precision.
Only the top_k predictions are taken for each of the videos.
Args:
predictions: Matrix containing the outputs of the model. Dimensions are
'batch' x 'num_classes'.
actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x
'num_classes'.
top_k: How many predictions to use per video.
Returns:
float: The global average precision.
"""
gap_calculator = ap_calculator.AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(
predictions, actuals, top_k)
gap_calculator.accumulate(
flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
def top_k_by_class(predictions, labels, k=20):
"""Extracts the top k predictions for each video, sorted by class.
Args:
predictions: A numpy matrix containing the outputs of the model. Dimensions
are 'batch' x 'num_classes'.
labels: A numpy matrix containing the ground truth labels. Dimensions are
'batch' x 'num_classes'.
k: the top k non-zero entries to preserve in each prediction.
Returns:
A tuple (predictions,labels, true_positives). 'predictions' and 'labels'
are lists of lists of floats. 'true_positives' is a list of scalars. The
length of the lists are equal to the number of classes. The entries in the
predictions variable are probability predictions, and
the corresponding entries in the labels variable are the ground truth for
those predictions. The entries in 'true_positives' are the number of true
positives for each class in the ground truth.
Raises:
ValueError: An error occurred when the k is not a positive integer.
"""
if k <= 0:
raise ValueError("k must be a positive integer.")
k = min(k, predictions.shape[1])
num_classes = predictions.shape[1]
prediction_triplets = []
for video_index in range(predictions.shape[0]):
prediction_triplets.extend(
top_k_triplets(predictions[video_index], labels[video_index], k))
out_predictions = [[] for _ in range(num_classes)]
out_labels = [[] for _ in range(num_classes)]
for triplet in prediction_triplets:
out_predictions[triplet[0]].append(triplet[1])
out_labels[triplet[0]].append(triplet[2])
out_true_positives = [np.sum(labels[:, i]) for i in range(num_classes)]
return out_predictions, out_labels, out_true_positives
def top_k_triplets(predictions, labels, k=20):
"""Get the top_k for a 1-d numpy array.
Args:
predictions: A numpy matrix containing the outputs of the model. Dimensions
are 'batch' x 'num_classes'.
labels: A numpy matrix containing the ground truth labels. Dimensions are
'batch' x 'num_classes'.
k: The number top predictions to pick.
Returns:
a sparse list of tuples in (prediction, class) format.
"""
m = len(predictions)
k = min(k, m)
indices = np.argpartition(predictions, -k)[-k:]
return [(index, predictions[index], labels[index]) for index in indices]
class EvaluationMetrics(object):
"""A class to store the evaluation metrics."""
def __init__(self, num_class, top_k, top_n):
"""Construct an EvaluationMetrics object to store the evaluation metrics.
Args:
num_class: A positive integer specifying the number of classes.
top_k: A positive integer specifying how many predictions are considered
per video.
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Raises:
ValueError: An error occurred when MeanAveragePrecisionCalculator cannot
not be constructed.
"""
self.sum_hit_at_one = 0.0
self.sum_perr = 0.0
self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(
num_class, filter_empty_classes=False, top_n=top_n)
self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()
self.top_k = top_k
self.num_examples = 0
self.num_class = num_class
def accumulate(self, predictions, labels):
"""Accumulate the metrics calculated locally for this mini-batch.
Args:
predictions: A numpy matrix containing the outputs of the model.
Dimensions are 'batch' x 'num_classes'.
labels: A numpy matrix containing the ground truth labels. Dimensions are
'batch' x 'num_classes'.
Returns:
dictionary: A dictionary storing the metrics for the mini-batch.
Raises:
ValueError: An error occurred when the shape of predictions and actuals
does not match.
"""
predictions, labels = self._convert_to_numpy(
predictions=predictions[0], groundtruths=labels[0])
batch_size = labels.shape[0]
mean_hit_at_one = calculate_hit_at_one(predictions, labels)
mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)
# Take the top 20 predictions.
sparse_predictions, sparse_labels, num_positives = top_k_by_class(
predictions, labels, self.top_k)
self.map_calculator.accumulate(sparse_predictions, sparse_labels,
num_positives)
self.global_ap_calculator.accumulate(
flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
self.num_examples += batch_size
self.sum_hit_at_one += mean_hit_at_one * batch_size
self.sum_perr += mean_perr * batch_size
return {"hit_at_one": mean_hit_at_one, "perr": mean_perr}
def get(self, return_per_class_ap=False):
"""Calculate the evaluation metrics for the whole epoch.
Args:
return_per_class_ap: a bool variable to determine whether return the
detailed class-wise ap for more detailed analysis. Default is `False`.
Raises:
ValueError: If no examples were accumulated.
Returns:
dictionary: a dictionary storing the evaluation metrics for the epoch. The
dictionary has the fields: avg_hit_at_one, avg_perr, and
aps (default nan).
"""
if self.num_examples <= 0:
raise ValueError("total_sample must be positive.")
avg_hit_at_one = self.sum_hit_at_one / self.num_examples
avg_perr = self.sum_perr / self.num_examples
aps = self.map_calculator.peek_map_at_n()
mean_ap = sum(aps) / self.num_class
gap = self.global_ap_calculator.peek_ap_at_n()
lw_map = self.map_calculator.peek_log_weighted_map_at_n()
epoch_info_dict = {
"avg_hit_at_one": avg_hit_at_one,
"avg_perr": avg_perr,
"map": mean_ap,
"gap": gap,
"lw_map": lw_map
}
if return_per_class_ap:
epoch_info_dict["per_class_ap"] = aps
return epoch_info_dict
def clear(self):
"""Clear the evaluation metrics and reset the EvaluationMetrics object."""
self.sum_hit_at_one = 0.0
self.sum_perr = 0.0
self.map_calculator.clear()
self.global_ap_calculator.clear()
self.num_examples = 0
@property
def name(self):
return "avg_prec_metric"
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
if groundtruths is not None:
labels = tf.nest.map_structure(lambda x: x.numpy(), groundtruths)
else:
labels = groundtruths
if predictions is not None:
outputs = tf.nest.map_structure(lambda x: x.numpy(), predictions)
else:
outputs = predictions
labels = labels * 1
return outputs, labels
| 10,275 | 35.05614 | 97 | py |
models | models-master/official/projects/yt8m/eval_utils/mean_average_precision_calculator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate the mean average precision.
It provides an interface for calculating mean average precision
for an entire list or the top-n ranked items.
Example usages:
We first call the function accumulate many times to process parts of the ranked
list. After processing all the parts, we call peek_map_at_n
to calculate the mean average precision.
```
import random
p = np.array([[random.random() for _ in xrange(50)] for _ in xrange(1000)])
a = np.array([[random.choice([0, 1]) for _ in xrange(50)]
for _ in xrange(1000)])
# mean average precision for 50 classes.
calculator = mean_average_precision_calculator.MeanAveragePrecisionCalculator(
num_class=50)
calculator.accumulate(p, a)
aps = calculator.peek_map_at_n()
```
"""
import numpy as np
from official.projects.yt8m.eval_utils import average_precision_calculator
class MeanAveragePrecisionCalculator(object):
"""This class is to calculate mean average precision."""
def __init__(self, num_class, filter_empty_classes=True, top_n=None):
"""Construct a calculator to calculate the (macro) average precision.
Args:
num_class: A positive Integer specifying the number of classes.
filter_empty_classes: whether to filter classes without any positives.
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Raises:
ValueError: An error occurred when num_class is not a positive integer;
or the top_n_array is not a list of positive integers.
"""
if not isinstance(num_class, int) or num_class <= 1:
raise ValueError("num_class must be a positive integer.")
self._ap_calculators = [] # member of AveragePrecisionCalculator
self._num_class = num_class # total number of classes
self._filter_empty_classes = filter_empty_classes
for _ in range(num_class):
self._ap_calculators.append(
average_precision_calculator.AveragePrecisionCalculator(top_n=top_n))
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
Args:
predictions: A list of lists storing the prediction scores. The outer
dimension corresponds to classes.
actuals: A list of lists storing the ground truth labels. The dimensions
should correspond to the predictions input. Any value larger than 0 will
be treated as positives, otherwise as negatives.
num_positives: If provided, it is a list of numbers representing the
number of true positives for each class. If not provided, the number of
true positives will be inferred from the 'actuals' array.
Raises:
ValueError: An error occurred when the shape of predictions and actuals
does not match.
"""
if not num_positives:
num_positives = [None for i in range(self._num_class)]
calculators = self._ap_calculators
for i in range(self._num_class):
calculators[i].accumulate(predictions[i], actuals[i], num_positives[i])
def clear(self):
for calculator in self._ap_calculators:
calculator.clear()
def is_empty(self):
return ([calculator.heap_size for calculator in self._ap_calculators
] == [0 for _ in range(self._num_class)])
def peek_map_at_n(self):
"""Peek the non-interpolated mean average precision at n.
Returns:
An array of non-interpolated average precision at n (default 0) for each
class.
"""
aps = []
for i in range(self._num_class):
if (not self._filter_empty_classes or
self._ap_calculators[i].num_accumulated_positives > 0):
ap = self._ap_calculators[i].peek_ap_at_n()
aps.append(ap)
return aps
def peek_log_weighted_map_at_n(self):
"""Peek the non-interpolated log weighted mean average precision at n.
Returns:
Log weighted mean average precision.
"""
sum_log_weighted_ap = 0
sum_log_weights = 0
for i in range(self._num_class):
pos = self._ap_calculators[i].num_accumulated_positives
if not self._filter_empty_classes or pos > 0:
ap = self._ap_calculators[i].peek_ap_at_n()
# TODO(b/286928055)
log_pos = np.log(1 + pos)
sum_log_weights += log_pos
sum_log_weighted_ap += ap * log_pos
if not sum_log_weights:
return 0
return sum_log_weighted_ap / sum_log_weights
| 5,021 | 35.656934 | 80 | py |
models | models-master/official/projects/yt8m/eval_utils/average_precision_calculator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate or keep track of the interpolated average precision.
It provides an interface for calculating interpolated average precision for an
entire list or the top-n ranked items. For the definition of the
(non-)interpolated average precision:
http://trec.nist.gov/pubs/trec15/appendices/CE.MEASURES06.pdf
Example usages:
1) Use it as a static function call to directly calculate average precision for
a short ranked list in the memory.
```
import random
p = np.array([random.random() for _ in xrange(10)])
a = np.array([random.choice([0, 1]) for _ in xrange(10)])
ap = average_precision_calculator.AveragePrecisionCalculator.ap(p, a)
```
2) Use it as an object for long ranked list that cannot be stored in memory or
the case where partial predictions can be observed at a time (Tensorflow
predictions). In this case, we first call the function accumulate many times
to process parts of the ranked list. After processing all the parts, we call
peek_interpolated_ap_at_n.
```
p1 = np.array([random.random() for _ in xrange(5)])
a1 = np.array([random.choice([0, 1]) for _ in xrange(5)])
p2 = np.array([random.random() for _ in xrange(5)])
a2 = np.array([random.choice([0, 1]) for _ in xrange(5)])
# interpolated average precision at 10 using 1000 break points
calculator = average_precision_calculator.AveragePrecisionCalculator(10)
calculator.accumulate(p1, a1)
calculator.accumulate(p2, a2)
ap3 = calculator.peek_ap_at_n()
```
"""
import heapq
import numbers
import random
import numpy
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value larger than 0
will be treated as positives, otherwise as negatives. num_positives = If
the 'predictions' and 'actuals' inputs aren't complete, then it's
possible some true positives were missed in them. In that case, you can
provide 'num_positives' in order to accurately track recall.
num_positives: number of positive examples.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if num_positives is not None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError(
"'num_positives' was provided but it was a negative number.")
if num_positives is not None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(
numpy.where(numpy.array(actuals) > 1e-5))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = numpy.array(list(zip(*self._heap)))
ap = self.ap_at_n(
predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions, actuals, n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = numpy.array(predictions)
actuals = numpy.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(
predictions, actuals)
sortidx = sorted(
range(len(predictions)), key=lambda k: predictions[k], reverse=True)
if total_num_positives is None:
numpos = numpy.size(numpy.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / max(denominator, epsilon)
return ret
| 9,610 | 34.205128 | 80 | py |
models | models-master/official/projects/yt8m/configs/yt8m_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yt8m.configs import yt8m # pylint: disable=unused-import
from official.projects.yt8m.configs.yt8m import yt8m as exp_cfg
class YT8MTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('yt8m_experiment',),)
def test_yt8m_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, cfg.TaskConfig)
self.assertIsInstance(config.task.model, hyperparams.Config)
self.assertIsInstance(config.task.train_data, cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,561 | 37.097561 | 80 | py |
models | models-master/official/projects/yt8m/configs/yt8m.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification configuration definition."""
import dataclasses
from typing import Optional, Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
YT8M_TRAIN_EXAMPLES = 3888919
YT8M_VAL_EXAMPLES = 1112356
# 2/frame -> frame level
# 3/frame -> segment level
YT8M_TRAIN_PATH = 'gs://youtube8m-ml/2/frame/train/train*.tfrecord'
YT8M_VAL_PATH = 'gs://youtube8m-ml/3/frame/validate/validate*.tfrecord'
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""The base configuration for building datasets.
Attributes:
name: Dataset name.
split: dataset split, 'train' or 'valid'.
feature_sizes: shape(length) of each feature specified in the feature_names.
feature_names: names of the features in the tf.SequenceExample.
feature_sources: if the feature from 'context' or 'features'.
feature_dtypes: dtype of decoded feature.
feature_from_bytes: decode feature from bytes or as dtype list.
label_fields: name of field to read from tf.SequenceExample.
segment_size: Number of frames in each segment.
segment_labels: Use segment level label. Default: False, video level label.
include_video_id: `True` means include video id (string) in the input to the
model.
temporal_stride: Not used. Need to deprecated.
max_frames: Maxim Number of frames in a input example. It is used to crop
the input in the temporal dimension.
sample_random_frames: If sample random frames.
num_sample_frames: Number of frames to sample for each input example.
num_classes: Number of classes to classify. Assuming it is a classification
task.
num_devices: Not used. To be deprecated.
input_path: The path to the input.
is_training: Whether this data is used for training or not.
num_examples: Number of examples in the dataset. It is used to compute the
steps for train or eval. set the value to `-1` to make the experiment run
until the end of dataset.
file_type: type of input files.
"""
name: Optional[str] = 'yt8m'
split: Optional[str] = None
feature_sizes: Tuple[int, ...] = (1024, 128)
feature_names: Tuple[str, ...] = ('rgb', 'audio')
feature_sources: Tuple[str, ...] = ('feature', 'feature')
feature_dtypes: Tuple[str, ...] = ('uint8', 'uint8')
feature_from_bytes: Tuple[bool, ...] = (True, True)
label_field: str = 'labels'
segment_size: int = 1
segment_labels: bool = False
include_video_id: bool = False
temporal_stride: int = 1
max_frames: int = 300 # Cap input frames.
sample_random_frames: bool = True
# Sample random frames if not None. No sampling in inference.
num_sample_frames: Optional[int] = 300
prefetch_buffer_size: int = 100
shuffle_buffer_size: int = 100
num_classes: int = 3862
num_devices: int = 1
input_path: str = ''
is_training: bool = True
num_examples: int = -1
file_type: str = 'tfrecord'
def yt8m(is_training):
"""YT8M dataset configs."""
# pylint: disable=unexpected-keyword-arg
return DataConfig(
temporal_stride=1,
segment_labels=False,
segment_size=5,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training, # pytype: disable=wrong-keyword-args
num_examples=YT8M_TRAIN_EXAMPLES if is_training else YT8M_VAL_EXAMPLES,
input_path=YT8M_TRAIN_PATH if is_training else YT8M_VAL_PATH)
# pylint: enable=unexpected-keyword-arg
@dataclasses.dataclass
class DbofModel(hyperparams.Config):
"""The model config."""
cluster_size: int = 3000
hidden_size: int = 2000
add_batch_norm: bool = True
pooling_method: str = 'average'
use_context_gate_cluster_layer: bool = False
context_gate_cluster_bottleneck_size: int = 0
@dataclasses.dataclass
class Backbone(hyperparams.OneOfConfig):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, one of the fields below.
dbof: dbof backbone config.
"""
type: Optional[str] = None
dbof: DbofModel = dataclasses.field(default_factory=DbofModel)
@dataclasses.dataclass
class MoeModel(hyperparams.Config):
"""The MoE model config."""
num_mixtures: int = 5
vocab_as_last_dim: bool = False
use_input_context_gate: bool = False
use_output_context_gate: bool = False
@dataclasses.dataclass
class LogisticModel(hyperparams.Config):
"""The logistic model config."""
return_logits: bool = False
@dataclasses.dataclass
class Head(hyperparams.OneOfConfig):
"""Configuration for aggreagation heads.
Attributes:
type: 'str', type of head be used, one of the fields below.
moe: MoE head config.
logistic: Logistic head config.
"""
type: Optional[str] = None
moe: MoeModel = dataclasses.field(default_factory=MoeModel)
logistic: LogisticModel = dataclasses.field(default_factory=LogisticModel)
@dataclasses.dataclass
class VideoClassificationModel(hyperparams.Config):
"""The classifier model config."""
backbone: Backbone = dataclasses.field(
default_factory=lambda: Backbone(type='dbof')
)
head: Head = dataclasses.field(default_factory=lambda: Head(type='moe'))
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation='relu', use_sync_bn=False
)
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
name: str = 'binary_crossentropy'
from_logits: bool = False
label_smoothing: float = 0.0
l2_weight_decay: float = 1e-5
@dataclasses.dataclass
class AveragePrecisionConfig(hyperparams.Config):
top_k: int = 20
top_n: Optional[int] = None
return_per_class_ap: bool = False
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
average_precision: Optional[AveragePrecisionConfig] = None
@dataclasses.dataclass
class YT8MTask(cfg.TaskConfig):
"""The task config."""
model: VideoClassificationModel = dataclasses.field(
default_factory=VideoClassificationModel
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: yt8m(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: yt8m(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
evaluation: Evaluation = dataclasses.field(
default_factory=lambda: Evaluation( # pylint: disable=g-long-lambda
average_precision=AveragePrecisionConfig()
)
)
gradient_clip_norm: float = 1.0
def add_trainer(
experiment: cfg.ExperimentConfig,
train_batch_size: int,
eval_batch_size: int,
learning_rate: float = 0.0001,
train_epochs: int = 50,
num_train_examples: int = YT8M_TRAIN_EXAMPLES,
num_val_examples: int = YT8M_VAL_EXAMPLES,
) -> cfg.ExperimentConfig:
"""Adds and config a trainer to the experiment config."""
if num_train_examples <= 0:
raise ValueError('Wrong train dataset size {!r}'.format(
experiment.task.train_data))
if num_val_examples <= 0:
raise ValueError('Wrong validation dataset size {!r}'.format(
experiment.task.validation_data))
experiment.task.train_data.global_batch_size = train_batch_size
experiment.task.validation_data.global_batch_size = eval_batch_size
steps_per_epoch = num_train_examples // train_batch_size
steps_per_loop = 500
experiment.trainer = cfg.TrainerConfig(
steps_per_loop=steps_per_loop,
summary_interval=steps_per_loop,
checkpoint_interval=steps_per_loop,
train_steps=train_epochs * steps_per_epoch,
validation_steps=num_val_examples // eval_batch_size,
validation_interval=steps_per_loop,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
'adam': {}
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate': learning_rate,
'decay_rate': 0.95,
'decay_steps': int(steps_per_epoch * 1.5),
'offset': 500,
}
},
'warmup': {
'linear': {
'name': 'linear',
'warmup_learning_rate': 0,
'warmup_steps': 500,
},
'type': 'linear',
}
}))
return experiment
@exp_factory.register_config_factory('yt8m_experiment')
def yt8m_experiment() -> cfg.ExperimentConfig:
"""Video classification general."""
exp_config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YT8MTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
'task.train_data.feature_sizes != None',
'task.train_data.feature_names != None',
'task.train_data.feature_sources != None',
'task.train_data.feature_dtypes != None',
])
# Per TPUv3 Core batch size 16GB HBM. `factor` in range(1, 26)
factor = 1
num_cores = 32 # for TPUv3 4x4
train_per_core_bs = 32 * factor
train_bs = train_per_core_bs * num_cores
eval_per_core_bs = 4 * 50 # multiplier<=100
eval_bs = eval_per_core_bs * num_cores
# based lr=0.0001 for bs=512
return add_trainer(
exp_config,
train_batch_size=train_bs,
eval_batch_size=eval_bs,
learning_rate=0.0001 * (train_bs / 512),
train_epochs=100)
| 10,309 | 33.481605 | 85 | py |
models | models-master/official/projects/yt8m/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.yt8m.configs import yt8m
| 692 | 37.5 | 74 | py |
models | models-master/official/projects/yt8m/dataloaders/yt8m_input_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.core import input_reader
from official.projects.yt8m.configs import yt8m as yt8m_configs
from official.projects.yt8m.dataloaders import utils
from official.projects.yt8m.dataloaders import yt8m_input
from official.vision.dataloaders import tfexample_utils
class Yt8mInputTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self.data_path = os.path.join(data_dir, 'data.tfrecord')
self.num_segment = 6
examples = [
utils.make_yt8m_example(self.num_segment, 120 + i) for i in range(8)
]
tfexample_utils.dump_to_tfrecord(self.data_path, tf_examples=examples)
def create_input_reader(self, params):
decoder = yt8m_input.Decoder(input_params=params)
decoder_fn = decoder.decode
parser = yt8m_input.Parser(input_params=params)
parser_fn = parser.parse_fn(params.is_training)
postprocess = yt8m_input.PostBatchProcessor(input_params=params)
postprocess_fn = postprocess.post_fn
transform_batch = yt8m_input.TransformBatcher(input_params=params)
batch_fn = transform_batch.batch_fn
return input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder_fn,
parser_fn=parser_fn,
postprocess_fn=postprocess_fn,
transform_and_batch_fn=batch_fn)
@parameterized.parameters((True,), (False,))
def test_read_video_level_input(self, include_video_id):
params = yt8m_configs.yt8m(is_training=False)
params.global_batch_size = 4
params.segment_labels = False
params.input_path = self.data_path
params.include_video_id = include_video_id
reader = self.create_input_reader(params)
dataset = reader.read()
iterator = iter(dataset)
example = next(iterator)
for k, v in example.items():
logging.info('DEBUG read example %r %r %r', k, v.shape, type(v))
if include_video_id:
self.assertCountEqual(
['video_matrix', 'labels', 'num_frames', 'video_ids'], example.keys())
else:
self.assertCountEqual(['video_matrix', 'labels', 'num_frames'],
example.keys())
batch_size = params.global_batch_size
self.assertEqual(
example['video_matrix'].shape.as_list(),
[batch_size, params.num_sample_frames, sum(params.feature_sizes)],
)
self.assertEqual(example['labels'].shape.as_list(),
[batch_size, params.num_classes])
# Check non empty labels.
self.assertGreater(np.nonzero(example['labels'][0].numpy())[0].shape[0], 0)
self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1])
if include_video_id:
self.assertEqual(example['video_ids'].shape.as_list(), [batch_size, 1])
@parameterized.parameters((True,), (False,))
def test_read_segment_level_input(self, include_video_id=False):
params = yt8m_configs.yt8m(is_training=False)
params.global_batch_size = 2
params.segment_labels = True
params.segment_size = 24
params.input_path = self.data_path
params.include_video_id = include_video_id
reader = self.create_input_reader(params)
dataset = reader.read()
iterator = iter(dataset)
example = next(iterator)
for k, v in example.items():
logging.info('DEBUG read example %r %r %r', k, v.shape, type(v))
if include_video_id:
self.assertCountEqual([
'video_matrix', 'labels', 'num_frames', 'label_weights', 'video_ids'
], example.keys())
else:
self.assertCountEqual(
['video_matrix', 'labels', 'num_frames', 'label_weights'],
example.keys())
batch_size = params.global_batch_size * self.num_segment
self.assertEqual(
example['video_matrix'].shape.as_list(),
[batch_size, params.num_sample_frames, sum(params.feature_sizes)],
)
self.assertEqual(example['labels'].shape.as_list(),
[batch_size, params.num_classes])
self.assertGreater(np.nonzero(example['labels'][0].numpy())[0].shape[0], 0)
self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1])
self.assertEqual(example['label_weights'].shape.as_list(),
[batch_size, params.num_classes])
if include_video_id:
self.assertEqual(example['video_ids'].shape.as_list(), [batch_size])
@parameterized.parameters((True,), (False,))
def test_read_video_level_float_input(self, include_video_id):
data_dir = os.path.join(self.get_temp_dir(), 'data2')
tf.io.gfile.makedirs(data_dir)
data_path = os.path.join(data_dir, 'data2.tfrecord')
examples = [
utils.make_example_with_float_features(self.num_segment)
for _ in range(8)
]
tfexample_utils.dump_to_tfrecord(data_path, tf_examples=examples)
params = yt8m_configs.yt8m(is_training=False)
params.global_batch_size = 4
params.segment_labels = False
params.input_path = data_path
params.num_frames = 2
params.max_frames = 2
params.feature_names = ('VIDEO_EMBEDDING/context_feature/floats',
'FEATURE/feature/floats')
params.feature_sources = ('context', 'feature')
params.feature_dtypes = ('float32', 'float32')
params.feature_sizes = (256, 2048)
params.feature_from_bytes = (False, False)
params.label_field = 'clip/label/index'
params.include_video_id = include_video_id
reader = self.create_input_reader(params)
dataset = reader.read()
iterator = iter(dataset)
example = next(iterator)
for k, v in example.items():
logging.info('DEBUG read example %r %r %r', k, v.shape, type(v))
logging.info('DEBUG read example %r', example['video_matrix'][0, 0, :])
if include_video_id:
self.assertCountEqual(
['video_matrix', 'labels', 'num_frames', 'video_ids'], example.keys())
else:
self.assertCountEqual(['video_matrix', 'labels', 'num_frames'],
example.keys())
# Check tensor values.
expected_context = examples[0].context.feature[
'VIDEO_EMBEDDING/context_feature/floats'].float_list.value
expected_feature = examples[0].feature_lists.feature_list[
'FEATURE/feature/floats'].feature[0].float_list.value
expected_labels = examples[0].context.feature[
params.label_field].int64_list.value
self.assertAllEqual(expected_feature,
example['video_matrix'][0, 0, params.feature_sizes[0]:])
self.assertAllEqual(expected_context,
example['video_matrix'][0, 0, :params.feature_sizes[0]])
self.assertAllEqual(
np.nonzero(example['labels'][0, :].numpy())[0], expected_labels)
self.assertGreater(np.nonzero(example['labels'][0].numpy())[0].shape[0], 0)
# Check tensor shape.
batch_size = params.global_batch_size
self.assertEqual(
example['video_matrix'].shape.as_list(),
[batch_size, params.num_sample_frames, sum(params.feature_sizes)],
)
self.assertEqual(example['labels'].shape.as_list(),
[batch_size, params.num_classes])
self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1])
if include_video_id:
self.assertEqual(example['video_ids'].shape.as_list(), [batch_size, 1])
if __name__ == '__main__':
tf.test.main()
| 8,215 | 38.883495 | 80 | py |
models | models-master/official/projects/yt8m/dataloaders/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of util functions for training and evaluating."""
from absl import logging
import numpy as np
import tensorflow as tf
from official.vision.dataloaders import tfexample_utils
def dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2):
"""Dequantize the feature from the byte format to the float format.
Args:
feat_vector: the input 1-d vector.
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
A float vector which has the same shape as feat_vector.
"""
assert max_quantized_value > min_quantized_value
quantized_range = max_quantized_value - min_quantized_value
scalar = quantized_range / 255.0
bias = (quantized_range / 512.0) + min_quantized_value
return feat_vector * scalar + bias
def make_summary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def add_global_step_summary(summary_writer,
global_step_val,
global_step_info_dict,
summary_scope="Eval"):
"""Add the global_step summary to the Tensorboard.
Args:
summary_writer: Tensorflow summary_writer.
global_step_val: a int value of the global step.
global_step_info_dict: a dictionary of the evaluation metrics calculated for
a mini-batch.
summary_scope: Train or Eval.
Returns:
A string of this global_step summary
"""
this_hit_at_one = global_step_info_dict["hit_at_one"]
this_perr = global_step_info_dict["perr"]
this_loss = global_step_info_dict["loss"]
examples_per_second = global_step_info_dict.get("examples_per_second", -1)
summary_writer.add_summary(
make_summary("GlobalStep/" + summary_scope + "_Hit@1", this_hit_at_one),
global_step_val)
summary_writer.add_summary(
make_summary("GlobalStep/" + summary_scope + "_Perr", this_perr),
global_step_val)
summary_writer.add_summary(
make_summary("GlobalStep/" + summary_scope + "_Loss", this_loss),
global_step_val)
if examples_per_second != -1:
summary_writer.add_summary(
make_summary("GlobalStep/" + summary_scope + "_Example_Second",
examples_per_second), global_step_val)
summary_writer.flush()
info = (
"global_step {0} | Batch Hit@1: {1:.3f} | Batch PERR: {2:.3f} | Batch "
"Loss: {3:.3f} | Examples_per_sec: {4:.3f}").format(
global_step_val, this_hit_at_one, this_perr, this_loss,
examples_per_second)
return info
def add_epoch_summary(summary_writer,
global_step_val,
epoch_info_dict,
summary_scope="Eval"):
"""Add the epoch summary to the Tensorboard.
Args:
summary_writer: Tensorflow summary_writer.
global_step_val: a int value of the global step.
epoch_info_dict: a dictionary of the evaluation metrics calculated for the
whole epoch.
summary_scope: Train or Eval.
Returns:
A string of this global_step summary
"""
epoch_id = epoch_info_dict["epoch_id"]
avg_hit_at_one = epoch_info_dict["avg_hit_at_one"]
avg_perr = epoch_info_dict["avg_perr"]
avg_loss = epoch_info_dict["avg_loss"]
aps = epoch_info_dict["aps"]
gap = epoch_info_dict["gap"]
mean_ap = np.mean(aps)
summary_writer.add_summary(
make_summary("Epoch/" + summary_scope + "_Avg_Hit@1", avg_hit_at_one),
global_step_val)
summary_writer.add_summary(
make_summary("Epoch/" + summary_scope + "_Avg_Perr", avg_perr),
global_step_val)
summary_writer.add_summary(
make_summary("Epoch/" + summary_scope + "_Avg_Loss", avg_loss),
global_step_val)
summary_writer.add_summary(
make_summary("Epoch/" + summary_scope + "_MAP", mean_ap), global_step_val)
summary_writer.add_summary(
make_summary("Epoch/" + summary_scope + "_GAP", gap), global_step_val)
summary_writer.flush()
info = ("epoch/eval number {0} | Avg_Hit@1: {1:.3f} | Avg_PERR: {2:.3f} "
"| MAP: {3:.3f} | GAP: {4:.3f} | Avg_Loss: {5:3f} | num_classes: {6}"
).format(epoch_id, avg_hit_at_one, avg_perr, mean_ap, gap, avg_loss,
len(aps))
return info
def get_list_of_feature_names_and_sizes(feature_names, feature_sizes):
"""Extract the list of feature names and the dimensionality.
Args:
feature_names: string containing comma separated list of feature names
feature_sizes: string containing comma separated list of feature sizes
Returns:
List of the feature names and list of the dimensionality of each feature.
Elements in the first/second list are strings/integers.
"""
list_of_feature_names = [
feature_names.strip() for feature_names in feature_names.split(",")
]
list_of_feature_sizes = [
int(feature_sizes) for feature_sizes in feature_sizes.split(",")
]
if len(list_of_feature_names) != len(list_of_feature_sizes):
logging.error(
"length of the feature names (=%r) != length of feature "
"sizes (=%r)", str(len(list_of_feature_names)),
str(len(list_of_feature_sizes)))
return list_of_feature_names, list_of_feature_sizes
def make_yt8m_example(
num_segment: int = 5, num_frames: int = 120
) -> tf.train.SequenceExample:
"""Generate fake data for unit tests."""
rgb = np.random.randint(low=256, size=1024, dtype=np.uint8)
audio = np.random.randint(low=256, size=128, dtype=np.uint8)
seq_example = tf.train.SequenceExample()
seq_example.context.feature["id"].bytes_list.value[:] = [b"id001"]
seq_example.context.feature["labels"].int64_list.value[:] = [1, 2, 3, 4]
seq_example.context.feature["segment_labels"].int64_list.value[:] = (
[4] * num_segment)
seq_example.context.feature["segment_start_times"].int64_list.value[:] = [
i * 5 for i in range(num_segment)
]
seq_example.context.feature["segment_scores"].float_list.value[:] = (
[0.5] * num_segment)
tfexample_utils.put_bytes_list_to_feature(
seq_example, rgb.tobytes(), key="rgb", repeat_num=num_frames)
tfexample_utils.put_bytes_list_to_feature(
seq_example, audio.tobytes(), key="audio", repeat_num=num_frames)
return seq_example
# TODO(yeqing): Move the test related functions to test_utils.
def make_example_with_float_features(
num_segment: int = 5) -> tf.train.SequenceExample:
"""Generate fake data for unit tests."""
rgb = np.random.rand(1, 2048).astype(np.float32)
audio = np.random.rand(256).astype(np.float32)
seq_example = tf.train.SequenceExample()
seq_example.context.feature["id"].bytes_list.value[:] = [b"id001"]
seq_example.context.feature["clip/label/index"].int64_list.value[:] = [
1, 2, 3, 4
]
seq_example.context.feature["segment_labels"].int64_list.value[:] = (
[4] * num_segment)
seq_example.context.feature["segment_start_times"].int64_list.value[:] = [
i * 5 for i in range(num_segment)
]
seq_example.context.feature["segment_scores"].float_list.value[:] = (
[0.] * num_segment)
seq_example.context.feature[
"VIDEO_EMBEDDING/context_feature/floats"].float_list.value[:] = (
audio.tolist())
tfexample_utils.put_float_list_to_feature(
seq_example, rgb.tolist(), key="FEATURE/feature/floats")
return seq_example
def sample_random_sequence(batch_video_matrix, num_frames, num_samples):
"""Samples a random sequence of frames of size num_samples.
Args:
batch_video_matrix: tensor of shape [batch_size x max_frames x feature_size]
num_frames: tensor of shape [batch_size x 1]
num_samples: a scalar indicating the number of samples
Returns:
reshaped batch_video_matrix in [batch_size x 'num_samples' x feature_size]
"""
batch_size = tf.shape(batch_video_matrix)[0]
frame_index_offset = tf.tile(
tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1])
max_start_frame_index = tf.maximum(num_frames - num_samples, 0)
start_frame_index = tf.cast(
tf.multiply(
tf.random.uniform([batch_size, 1]),
tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32)
frame_index = tf.minimum(start_frame_index + frame_index_offset,
tf.cast(num_frames - 1, tf.int32))
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(batch_video_matrix, index)
def sample_random_frames(batch_video_matrix, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
batch_video_matrix: tensor of shape [batch_size x max_frames x feature_size]
num_frames: tensor of shape [batch_size x 1]
num_samples (int): a scalar indicating the number of samples
Returns:
reshaped batch_video_matrix in [batch_size x 'num_samples' x feature_size]
"""
batch_size = tf.shape(batch_video_matrix)[0]
frame_index = tf.cast(
tf.multiply(
tf.random.uniform([batch_size, num_samples]),
tf.tile(num_frames, [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(batch_video_matrix, index)
def sample_video_frames(
batch_video_matrix: tf.Tensor,
num_frames: tf.Tensor,
random_frames: bool = True,
num_sample_frames: int = 25,
):
"""Preprocesses input to sample frames."""
# Sample random frames / random sequence.
num_frames = tf.cast(num_frames, tf.float32)
if random_frames:
batch_video_matrix = sample_random_frames(
batch_video_matrix, num_frames, num_sample_frames
)
else:
batch_video_matrix = sample_random_sequence(
batch_video_matrix, num_frames, num_sample_frames
)
return batch_video_matrix
| 10,578 | 35.47931 | 80 | py |
models | models-master/official/projects/yt8m/dataloaders/yt8m_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""class YT8MFrameFeatureReader(BaseReader).
Reads TFRecords of SequenceExamples.
The TFRecords must contain SequenceExamples with the sparse in64 'labels'
context feature and a fixed length byte-quantized feature vector, obtained
from the features in 'feature_names'. The quantized features will be mapped
back into a range between min_quantized_value and max_quantized_value.
link for details: https://research.google.com/youtube8m/download.html
"""
from typing import Any, Dict
import tensorflow as tf
from official.projects.yt8m.dataloaders import utils
from official.vision.configs import video_classification as exp_cfg
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be cast
to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.shape.as_list() # A copy is being made.
new_shape[axis] = new_size
resized = tf.ensure_shape(resized, new_shape)
return resized
def _process_segment_and_label(video_matrix, num_frames, contexts,
segment_labels, segment_size,
num_classes) -> Dict[str, tf.Tensor]:
"""Processes a batched Tensor of frames.
The same parameters used in process should be used here.
Args:
video_matrix: different features concatenated into one matrix
num_frames: Number of frames per subclip.
contexts: context information extracted from decoder
segment_labels: if we read segment labels instead.
segment_size: the segment_size used for reading segments. Segment length.
num_classes: a positive integer for the number of classes.
Returns:
output: dictionary containing batch information
"""
# Partition frame-level feature matrix to segment-level feature matrix.
batch_video_ids = None
if segment_labels:
start_times = contexts["segment_start_times"].values
# Here we assume all the segments that started at the same start time has
# the same segment_size.
uniq_start_times, seg_idxs = tf.unique(start_times, out_idx=tf.dtypes.int64)
# Range gather matrix, e.g., [[0,1,2],[1,2,3]] for segment_size == 3.
range_mtx = tf.expand_dims(
uniq_start_times, axis=-1) + tf.expand_dims(
tf.range(0, segment_size, dtype=tf.int64), axis=0)
# Shape: [num_segment, segment_size, feature_dim].
batch_video_matrix = tf.gather_nd(video_matrix,
tf.expand_dims(range_mtx, axis=-1))
num_segment = tf.shape(batch_video_matrix)[0]
if "id" in contexts:
batch_video_ids = tf.reshape(
tf.tile([contexts["id"]], [num_segment]), (num_segment,))
batch_frames = tf.reshape(
tf.tile([segment_size], [num_segment]), (num_segment, 1))
batch_frames = tf.cast(batch_frames, tf.int32)
# For segment labels, all labels are not exhaustively rated. So we only
# evaluate the rated labels.
# Label indices for each segment, shape: [num_segment, 2].
label_indices = tf.stack([seg_idxs, contexts["segment_labels"].values],
axis=-1)
label_values = contexts["segment_scores"].values
sparse_labels = tf.sparse.SparseTensor(label_indices, label_values,
(num_segment, num_classes))
batch_labels = tf.sparse.to_dense(sparse_labels, validate_indices=False)
sparse_label_weights = tf.sparse.SparseTensor(
label_indices, tf.ones_like(label_values, dtype=tf.float32),
(num_segment, num_classes))
batch_label_weights = tf.sparse.to_dense(
sparse_label_weights, validate_indices=False)
else:
# Process video-level labels.
label_indices = contexts["labels"].values
sparse_labels = tf.sparse.SparseTensor(
tf.expand_dims(label_indices, axis=-1),
tf.ones_like(contexts["labels"].values, dtype=tf.float32),
(num_classes,),
)
labels = tf.sparse.to_dense(
sparse_labels, validate_indices=False)
# convert to batch format.
if "id" in contexts:
batch_video_ids = tf.expand_dims(contexts["id"], 0)
batch_video_matrix = tf.expand_dims(video_matrix, 0)
batch_labels = tf.expand_dims(labels, 0)
batch_frames = tf.expand_dims(num_frames, 0)
batch_label_weights = None
output_dict = {
"video_matrix": batch_video_matrix,
"labels": batch_labels,
"num_frames": batch_frames,
}
if batch_video_ids is not None:
output_dict["video_ids"] = batch_video_ids
if batch_label_weights is not None:
output_dict["label_weights"] = batch_label_weights
return output_dict
def _get_video_matrix(features, feature_size, dtype, max_frames,
max_quantized_value, min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values.
feature_size: length of each frame feature vector.
dtype: raw type of the feature.
max_frames: number of frames (rows) in the output feature_matrix.
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
decoded_features = tf.reshape(features, [-1, feature_size])
if dtype.is_integer:
feature_matrix = utils.dequantize(decoded_features, max_quantized_value,
min_quantized_value)
else:
feature_matrix = decoded_features
num_frames = tf.math.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = feature_matrix[:num_frames]
return feature_matrix, num_frames
def _concat_features(features, feature_names, feature_sizes, feature_dtypes,
max_frames, max_quantized_value, min_quantized_value):
"""Loads (potentially) different types of features and concatenates them.
Args:
features: raw feature values
feature_names: list of feature names
feature_sizes: list of features sizes
feature_dtypes: dtype of the feature.
max_frames: number of frames in the sequence
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
video_matrix: different features concatenated into one matrix
num_frames: the number of frames in the video
"""
num_features = len(feature_names)
assert num_features > 0, "No feature selected: feature_names is empty!"
assert len(feature_names) == len(feature_sizes), (
"length of feature_names (={}) != length of feature_sizes (={})".format(
len(feature_names), len(feature_sizes)))
assert len(feature_names) == len(feature_dtypes), (
"length of feature_names (={}) != length of feature_sizes (={})".format(
len(feature_names), len(feature_dtypes)))
num_frames = -1 # the number of frames in the video
feature_matrices = [None] * num_features # an array of different features
for i in range(num_features):
feature_matrix, num_frames_in_this_feature = _get_video_matrix(
features[feature_names[i]], feature_sizes[i],
tf.dtypes.as_dtype(feature_dtypes[i]), max_frames, max_quantized_value,
min_quantized_value)
if num_frames == -1:
num_frames = num_frames_in_this_feature
feature_matrices[i] = feature_matrix
# Concatenate different features.
video_matrix = tf.concat(feature_matrices, 1)
return video_matrix, num_frames
class Decoder(decoder.Decoder):
"""A tf.train.SequeneExample decoder for classification task."""
def __init__(
self,
input_params: exp_cfg.DataConfig,
):
self._segment_labels = input_params.segment_labels
self._feature_names = input_params.feature_names
self._feature_sources = input_params.feature_sources
self._feature_sizes = input_params.feature_sizes
self._feature_dtypes = input_params.feature_dtypes
self._feature_from_bytes = input_params.feature_from_bytes
self._include_video_id = input_params.include_video_id
self._label_field = input_params.label_field
assert len(self._feature_names) == len(self._feature_sources), (
"length of feature_names (={}) != length of feature_sizes (={})".format(
len(self._feature_names), len(self._feature_sources)))
self._context_features = {}
self._sequence_features = {}
if self._include_video_id:
self._context_features["id"] = tf.io.FixedLenFeature([], tf.string)
if self._segment_labels:
self._context_features.update({
# There is no need to read end-time given we always assume the segment
# has the same size.
"segment_labels": tf.io.VarLenFeature(tf.int64),
"segment_start_times": tf.io.VarLenFeature(tf.int64),
"segment_scores": tf.io.VarLenFeature(tf.float32)
})
else:
self._add_labels_specification()
for i, name in enumerate(self._feature_names):
if self._feature_from_bytes[i]:
feature_type = tf.io.FixedLenSequenceFeature([], dtype=tf.string)
else:
dtype = tf.dtypes.as_dtype(self._feature_dtypes[i])
feature_shape = [self._feature_sizes[i]]
if self._feature_sources[i] == "feature":
feature_type = tf.io.FixedLenSequenceFeature(feature_shape, dtype)
else:
feature_type = tf.io.FixedLenFeature(feature_shape, dtype)
if self._feature_sources[i] == "feature":
self._sequence_features[name] = feature_type
elif self._feature_sources[i] == "context":
self._context_features[name] = feature_type
else:
raise ValueError(
f"Unknown feature source {self._feature_sources[i]} for {name}")
def _add_labels_specification(self):
if not self._label_field:
raise ValueError(f"Invalid label field: {self._label_field}!")
self._context_features.update(
{self._label_field: tf.io.VarLenFeature(tf.int64)})
def decode(self,
serialized_example: tf.train.SequenceExample) -> Dict[str, Any]:
"""Parses a single tf.train.SequenceExample into video and label tensors."""
contexts, features = tf.io.parse_single_sequence_example(
serialized_example,
context_features=self._context_features,
sequence_features=self._sequence_features)
decoded_tensor = {**contexts, **features}
for i, name in enumerate(self._feature_names):
# Convert the VarLen feature to dense tensor.
if self._feature_from_bytes[i]:
dtype = tf.dtypes.as_dtype(self._feature_dtypes[i])
decoded_tensor[name] = tf.cast(
tf.io.decode_raw(decoded_tensor[name], out_type=dtype), tf.float32
)
else:
if isinstance(decoded_tensor[name], tf.SparseTensor):
decoded_tensor[name] = tf.sparse.to_dense(decoded_tensor[name])
return decoded_tensor
class Parser(parser.Parser):
"""Parses a video and label dataset.
takes the decoded raw tensors dict
and parse them into a dictionary of tensors
that can be consumed by the model.
It will be executed after decoder.
"""
def __init__(
self,
input_params: exp_cfg.DataConfig,
max_quantized_value=2,
min_quantized_value=-2,
):
self._num_classes = input_params.num_classes
self._label_field = input_params.label_field
self._segment_size = input_params.segment_size
self._segment_labels = input_params.segment_labels
self._include_video_id = input_params.include_video_id
self._feature_names = input_params.feature_names
self._feature_sources = input_params.feature_sources
self._feature_sizes = input_params.feature_sizes
self._feature_dtypes = input_params.feature_dtypes
self._max_frames = input_params.max_frames
self._sample_random_frames = input_params.sample_random_frames
self._num_sample_frames = input_params.num_sample_frames
self._max_quantized_value = max_quantized_value
self._min_quantized_value = min_quantized_value
def _parse_train_data(self, decoded_tensors):
"""Parses data for training."""
# loads (potentially) different types of features and concatenates them
video_matrix, num_frames = _concat_features(
decoded_tensors, self._feature_names, self._feature_sizes,
self._feature_dtypes, self._max_frames, self._max_quantized_value,
self._min_quantized_value)
if not self._include_video_id and "id" in decoded_tensors:
del decoded_tensors["id"]
outputs = self._process_label(video_matrix, num_frames, decoded_tensors)
if self._num_sample_frames is not None:
outputs["video_matrix"] = utils.sample_video_frames(
outputs["video_matrix"],
tf.reshape(outputs["num_frames"], [-1, 1]),
random_frames=self._sample_random_frames,
num_sample_frames=self._num_sample_frames,
)
return outputs
def _parse_eval_data(self, decoded_tensors):
"""Parses data for evaluation."""
# loads (potentially) different types of features and concatenates them
video_matrix, num_frames = _concat_features(
decoded_tensors, self._feature_names, self._feature_sizes,
self._feature_dtypes, self._max_frames, self._max_quantized_value,
self._min_quantized_value)
if not self._include_video_id and "id" in decoded_tensors:
del decoded_tensors["id"]
outputs = self._process_label(video_matrix, num_frames, decoded_tensors)
if self._num_sample_frames is not None:
outputs["video_matrix"] = utils.sample_video_frames(
outputs["video_matrix"],
tf.reshape(outputs["num_frames"], [-1, 1]),
random_frames=self._sample_random_frames,
num_sample_frames=self._num_sample_frames,
)
return outputs
def _process_label(self, video_matrix, num_frames, contexts):
"""Processes a batched Tensor of frames.
Args:
video_matrix: video feature matric.
num_frames: number of frames in this video.
contexts: context information extracted from decoder.
Returns:
output: dictionary containing batch information
"""
if self._label_field and not self._segment_labels:
contexts["labels"] = contexts[self._label_field]
output_dict = _process_segment_and_label(video_matrix, num_frames, contexts,
self._segment_labels,
self._segment_size,
self._num_classes)
return output_dict
def parse_fn(self, is_training):
"""Returns a parse fn that reads and parses raw tensors from the decoder.
Args:
is_training: a `bool` to indicate whether it is in training mode.
Returns:
parse: a `callable` that takes the serialized example and generate the
images, labels tuple where labels is a dict of Tensors that contains
labels.
"""
def parse(decoded_tensors):
"""Parses the serialized example data."""
# Concatenate video features to all frames if there are both video-level
# (context) and frame-level (feature) features.
if "feature" in self._feature_sources:
# Take first frame feature matrix, any feature matrix should be fine
# since assume all frame features have same number of frames.
feature_idx = self._feature_sources.index("feature")
num_frames = tf.shape(
decoded_tensors[self._feature_names[feature_idx]]
)[0]
for feature_idx, feature_source in enumerate(self._feature_sources):
if feature_source == "context":
feature_name = self._feature_names[feature_idx]
context_tensor = tf.reshape(
decoded_tensors[feature_name],
shape=(1, self._feature_sizes[feature_idx]),
)
decoded_tensors[feature_name] = tf.tile(
context_tensor, [num_frames, 1]
)
if is_training:
return self._parse_train_data(decoded_tensors)
else:
return self._parse_eval_data(decoded_tensors)
return parse
class TransformBatcher():
"""Performs manual batching on input dataset."""
def __init__(self, input_params: exp_cfg.DataConfig):
self._segment_labels = input_params.segment_labels
self._global_batch_size = input_params.global_batch_size
self._is_training = input_params.is_training
self._include_video_id = input_params.include_video_id
self._drop_remainder = input_params.drop_remainder
def batch_fn(self, dataset, input_context):
"""Add padding when segment_labels is true."""
per_replica_batch_size = input_context.get_per_replica_batch_size(
self._global_batch_size) if input_context else self._global_batch_size
# Add padding specifications.
pad_values = {
"video_matrix": 0.0,
"labels": -1.0,
"num_frames": 0,
}
if self._include_video_id:
pad_values["video_ids"] = None
if self._segment_labels:
pad_values["label_weights"] = 0.0
dataset = dataset.padded_batch(
per_replica_batch_size,
padding_values=pad_values,
drop_remainder=self._drop_remainder,
)
return dataset
class PostBatchProcessor():
"""Processes a video and label dataset which is batched."""
def __init__(self, input_params: exp_cfg.DataConfig):
self.segment_labels = input_params.segment_labels
self.num_classes = input_params.num_classes
self.num_sample_frames = input_params.num_sample_frames
self.num_features = sum(input_params.feature_sizes)
def post_fn(self, batched_tensors: Dict[str,
tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Processes batched Tensors."""
video_ids = batched_tensors.get("video_ids", None)
video_matrix = batched_tensors["video_matrix"]
labels = batched_tensors["labels"]
num_frames = batched_tensors["num_frames"]
if self.segment_labels:
# [batch x num_segment x num_sample_frames x num_features]
# -> [batch * num_segment x num_sample_frames x num_features]
if video_ids is not None:
video_ids = tf.reshape(video_ids, [-1])
video_matrix = tf.reshape(video_matrix,
[-1, self.num_sample_frames, self.num_features])
labels = tf.reshape(labels, [-1, self.num_classes])
num_frames = tf.reshape(num_frames, [-1, 1])
batched_tensors["label_weights"] = tf.reshape(
batched_tensors["label_weights"], [-1, self.num_classes])
else:
# NOTE(b/237445211): Must provide axis argument to tf.squeeze.
video_matrix = tf.squeeze(video_matrix, axis=1)
labels = tf.squeeze(labels, axis=1)
num_frames = tf.reshape(num_frames, [-1, 1])
if "label_weights" in batched_tensors:
batched_tensors["label_weights"] = tf.squeeze(
batched_tensors["label_weights"], axis=1)
batched_tensors.update({
"video_matrix": video_matrix,
"labels": labels,
"num_frames": num_frames,
})
if video_ids is not None:
batched_tensors["video_ids"] = video_ids
return batched_tensors
| 20,769 | 38.411765 | 80 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YT8M prediction model definition."""
import functools
from typing import Any, Optional
from absl import logging
import tensorflow as tf
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import backbones # pylint: disable=unused-import
from official.projects.yt8m.modeling import heads
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
class VideoClassificationModel(tf.keras.Model):
"""A video classification model class builder.
The model consists of a backbone (dbof) and a classification head.
The dbof backbone projects features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
"""
def __init__(
self,
params: yt8m_cfg.VideoClassificationModel,
backbone: Optional[tf.keras.Model] = None,
num_classes: int = 3862,
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, 1152]
),
l2_weight_decay: Optional[float] = None,
**kwargs,
):
"""YT8M video classification model initialization function.
Args:
params: Model configuration parameters.
backbone: Optional backbone model. Will build a backbone if None.
num_classes: `int` number of classes in dataset.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
[batch_size x num_frames x num_features]
l2_weight_decay: An optional `float` of kernel regularizer weight decay.
**kwargs: keyword arguments to be passed.
"""
super().__init__()
self._params = params
self._num_classes = num_classes
self._input_specs = input_specs
self._l2_weight_decay = l2_weight_decay
self._config_dict = {
"params": params,
"input_specs": input_specs,
"num_classes": num_classes,
"l2_weight_decay": l2_weight_decay,
}
if backbone is None:
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay / 2.0)
if l2_weight_decay
else None
)
backbone = factory.build_backbone(
input_specs=input_specs,
backbone_config=params.backbone,
norm_activation_config=params.norm_activation,
l2_regularizer=l2_regularizer,
**kwargs,
)
self.backbone = backbone
self.build_head()
def build_head(self):
logging.info("Build DbofModel with %s.", self._params.head.type)
head_cfg = self._params.head.get()
if self._params.head.type == "moe":
normalizer_params = dict(
synchronized=self._params.norm_activation.use_sync_bn,
momentum=self._params.norm_activation.norm_momentum,
epsilon=self._params.norm_activation.norm_epsilon,
)
aggregation_head = functools.partial(
heads.MoeModel, normalizer_params=normalizer_params
)
elif self._params.head.type == "logistic":
aggregation_head = heads.LogisticModel
else:
logging.warn("Skip build head type: %s", self._params.head.type)
return
l2_regularizer = (
tf.keras.regularizers.l2(self._l2_weight_decay / 2.0)
if self._l2_weight_decay
else None
)
self.head = aggregation_head(
input_specs=layers.InputSpec(
shape=[None, self._params.backbone.get().hidden_size]
),
vocab_size=self._num_classes,
l2_regularizer=l2_regularizer,
**head_cfg.as_dict(),
)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
def call(
self, inputs: tf.Tensor, training: Any = None, mask: Any = None
) -> dict[str, tf.Tensor]:
features = self.backbone(inputs)
outputs = self.head(features)
return outputs
@property
def checkpoint_items(self) -> dict[str, Any]:
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone, head=self.head)
| 5,030 | 33.22449 | 86 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of util functions for model construction."""
from typing import Any, Dict, Optional, Union
import tensorflow as tf
def frame_pooling(frames, method):
"""Pools over the frames of a video.
Args:
frames: tensor of shape [batch_size, num_frames, feature_size].
method: string indicating pooling method, one of: "average", "max",
"attention", or "none".
Returns:
tensor of shape [batch_size, feature_size] for average, max, or
attention pooling, and shape [batch_size*num_frames, feature_size]
for none pooling.
Raises:
ValueError: if method is other than "average", "max", "attention", or
"none".
"""
if method == "average":
reduced = tf.reduce_mean(frames, 1)
elif method == "max":
reduced = tf.reduce_max(frames, 1)
elif method == "none":
feature_size = frames.shape_as_list()[2]
reduced = tf.reshape(frames, [-1, feature_size])
else:
raise ValueError("Unrecognized pooling method: %s" % method)
return reduced
def context_gate(
input_features,
normalizer_fn=None,
normalizer_params: Optional[Dict[str, Any]] = None,
kernel_initializer: Union[
str, tf.keras.regularizers.Regularizer] = "glorot_uniform",
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_initializer: Union[str, tf.keras.regularizers.Regularizer] = "zeros",
hidden_layer_size: int = 0,
pooling_method: Optional[str] = None,
additive_residual: bool = False):
"""Context Gating.
More details: https://arxiv.org/pdf/1706.06905.pdf.
Args:
input_features: a tensor of at least rank 2.
normalizer_fn: Normalization function to use instead of `biases` (e.g.
tf.contrib.layers.batch_norm). If None, bias is added.
normalizer_params: Normalization function parameters.
kernel_initializer: Weight initializer to use instead of Xavier (e.g.
tf.contrib.layers.variance_scaling_initializer).
kernel_regularizer: Weight regularizer to use instead of None (e.g.,
tf.contrib.layers.l2_regularizer(l2_penalty)).
bias_initializer: Biases initializer to use (default tf.zeros_initializer)
hidden_layer_size: Dimensionality of the context gating hidden layer size,
if any. If None, will apply a fully-connected context gating layer with
shape [input_size x input_size]. If set to an int N, will factorize the
context gating layer into [input_size x N] x [N x input_size] as in the
squeeze-and-excitation block from https://arxiv.org/pdf/1709.01507.pdf.
pooling_method: Whether to perform global pooling of the local features
before applying the context gating layer. This is relevant only if the
input_features tensor has rank > 2, e.g., it's a sequence of frame
features, [batch_size, num_frames, feature_dim], or spatial convolution
features, [batch_size*num_frames, h, w, feature_dim]. If the inputs are a
set of local features and pooling_method is not None, will pool features
across all but the batch_size dimension using the specified pooling
method, and pass the aggregated features as context to the gating layer.
For a list of pooling methods, see the frame_pooling() function.
additive_residual: If true, will use ReLu6-activated (additive) residual
connections instead of Sigmoid-activated (multiplicative) connections when
combining the input_features with the context gating branch.
Returns:
A tensor with the same shape as input_features.
"""
if normalizer_params is None:
normalizer_params = {}
with tf.name_scope("ContextGating"):
num_dimensions = len(input_features.shape.as_list())
feature_size = input_features.shape.as_list()[-1]
if pooling_method:
assert num_dimensions > 2
# Collapse the inner axes of the original features shape into a 3D tensor
original_shape = tf.shape(input_features)
# The last dimension will change after concatenating the context
new_shape = tf.concat(
[original_shape[:-1],
tf.constant([2 * feature_size])], 0)
batch_size = original_shape[0]
reshaped_features = tf.reshape(input_features,
[batch_size, -1, feature_size])
num_features = tf.shape(reshaped_features)[1]
# Pool the feature channels across the inner axes to get global context
context_features = frame_pooling(reshaped_features, pooling_method)
context_features = tf.expand_dims(context_features, 1)
# Replicate the global context features and concat to the local features.
context_features = tf.tile(context_features, [1, num_features, 1])
context_features = tf.concat([reshaped_features, context_features], 2)
context_features = tf.reshape(context_features, shape=new_shape)
else:
context_features = input_features
if hidden_layer_size >= 2:
gates_bottleneck = tf.keras.layers.Dense(
hidden_layer_size,
activation="relu6",
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
)(context_features)
if normalizer_fn:
gates_bottleneck = normalizer_fn(**normalizer_params)(gates_bottleneck)
else:
gates_bottleneck = context_features
activation_fn = (tf.nn.relu6 if additive_residual else tf.nn.sigmoid)
gates = tf.keras.layers.Dense(
feature_size,
activation=activation_fn,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
)(gates_bottleneck)
if normalizer_fn:
gates = normalizer_fn(**normalizer_params)(gates)
if additive_residual:
input_features += tf.cast(gates, input_features.dtype)
else:
input_features *= tf.cast(gates, input_features.dtype)
return input_features
| 6,547 | 41.519481 | 80 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yt8m network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import yt8m_model
class YT8MNetworkTest(parameterized.TestCase, tf.test.TestCase):
"""Class for testing yt8m network."""
# test_yt8m_network_creation arbitrary params
@parameterized.parameters((32, 1152), (24, 1152)) # 1152 = 1024 + 128
def test_yt8m_network_creation(self, num_frames, feature_dims):
"""Test for creation of a YT8M Model.
Args:
num_frames: number of frames.
feature_dims: indicates total dimension size of the features.
"""
input_specs = tf.keras.layers.InputSpec(shape=[None, None, feature_dims])
num_classes = 3862
model = yt8m_model.VideoClassificationModel(
params=yt8m_cfg.YT8MTask().model,
num_classes=num_classes,
input_specs=input_specs,
)
# batch = 2 -> arbitrary value for test.
inputs = np.random.rand(2, num_frames, feature_dims)
predictions = model(inputs)['predictions']
self.assertAllEqual([2, num_classes], predictions.numpy().shape)
def test_serialize_deserialize(self):
model = yt8m_model.VideoClassificationModel(
params=yt8m_cfg.YT8MTask().model
)
config = model.get_config()
new_model = yt8m_model.VideoClassificationModel.from_config(config)
# If the serialization was successful,
# the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,225 | 32.727273 | 77 | py |
models | models-master/official/projects/yt8m/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yt8m/modeling/backbones/dbof.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dbof model definitions."""
import functools
from typing import Optional
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import yt8m_model_utils as utils
from official.vision.configs import common
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
class Dbof(tf.keras.Model):
"""A YT8M model class builder.
Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
"""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, 1152]
),
params: yt8m_cfg.DbofModel = yt8m_cfg.DbofModel(),
norm_activation: common.NormActivation = common.NormActivation(),
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""YT8M initialization function.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
[batch_size x num_frames x num_features].
params: model configuration parameters.
norm_activation: Model normalization and activation configs.
l2_regularizer: An optional kernel weight regularizer.
**kwargs: keyword arguments to be passed.
"""
self._self_setattr_tracking = False
self._input_specs = input_specs
self._params = params
self._norm_activation = norm_activation
self._act_fn = tf_utils.get_activation(self._norm_activation.activation)
self._norm = functools.partial(
layers.BatchNormalization,
momentum=self._norm_activation.norm_momentum,
epsilon=self._norm_activation.norm_epsilon,
synchronized=self._norm_activation.use_sync_bn,
)
# [batch_size x num_frames x num_features]
feature_size = input_specs.shape[-1]
# shape 'excluding' batch_size
model_input = tf.keras.Input(shape=self._input_specs.shape[1:])
# normalize input features
input_data = tf.nn.l2_normalize(model_input, -1)
tf.summary.histogram("input_hist", input_data)
# configure model
if params.add_batch_norm:
input_data = self._norm(name="input_bn")(input_data)
# activation = reshaped input * cluster weights
if params.cluster_size > 0:
activation = layers.Dense(
params.cluster_size,
kernel_regularizer=l2_regularizer,
kernel_initializer=tf.random_normal_initializer(
stddev=1 / tf.sqrt(tf.cast(feature_size, tf.float32))
),
)(input_data)
else:
activation = input_data
if params.add_batch_norm:
activation = self._norm(name="cluster_bn")(activation)
else:
cluster_biases = tf.Variable(
tf.random_normal_initializer(stddev=1 / tf.math.sqrt(feature_size))(
shape=[params.cluster_size]),
name="cluster_biases")
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = self._act_fn(activation)
tf.summary.histogram("cluster_output", activation)
if params.use_context_gate_cluster_layer:
pooling_method = None
norm_args = dict(name="context_gate_bn")
activation = utils.context_gate(
activation,
normalizer_fn=self._norm,
normalizer_params=norm_args,
pooling_method=pooling_method,
hidden_layer_size=params.context_gate_cluster_bottleneck_size,
kernel_regularizer=l2_regularizer)
activation = utils.frame_pooling(activation, params.pooling_method)
# activation = activation * hidden1_weights
activation = layers.Dense(
params.hidden_size,
kernel_regularizer=l2_regularizer,
kernel_initializer=tf.random_normal_initializer(
stddev=1 / tf.sqrt(tf.cast(params.cluster_size, tf.float32))))(
activation)
if params.add_batch_norm:
activation = self._norm(name="hidden1_bn")(activation)
else:
hidden1_biases = tf.Variable(
tf.random_normal_initializer(stddev=0.01)(shape=[params.hidden_size]),
name="hidden1_biases")
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = self._act_fn(activation)
tf.summary.histogram("hidden1_output", activation)
super().__init__(inputs=model_input, outputs=activation, **kwargs)
@factory.register_backbone_builder("dbof")
def build_dbof(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
) -> tf.keras.Model:
"""Builds a dbof backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == "dbof", f"Inconsistent backbone type {backbone_type}"
return Dbof(
input_specs=input_specs,
params=backbone_cfg,
norm_activation=norm_activation_config,
l2_regularizer=l2_regularizer,
**kwargs,
)
| 6,032 | 34.280702 | 80 | py |
models | models-master/official/projects/yt8m/modeling/backbones/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones package definition."""
from official.projects.yt8m.modeling.backbones import dbof
| 705 | 38.222222 | 74 | py |
models | models-master/official/projects/yt8m/modeling/heads/logistic.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logistic model definitions."""
from typing import Optional
import tensorflow as tf
layers = tf.keras.layers
class LogisticModel(tf.keras.Model):
"""Logistic prediction head model with L2 regularization."""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(shape=[None, 128]),
vocab_size: int = 3862,
return_logits: bool = False,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""Creates a logistic model.
Args:
input_specs: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
return_logits: if True also return logits.
l2_regularizer: An optional L2 weight regularizer.
**kwargs: extra key word args.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
logits = layers.Dense(vocab_size, kernel_regularizer=l2_regularizer)(inputs)
outputs = {"predictions": tf.nn.sigmoid(logits)}
if return_logits:
outputs.update({"logits": logits})
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
| 1,926 | 32.224138 | 80 | py |
models | models-master/official/projects/yt8m/modeling/heads/moe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MoE model definitions."""
from typing import Any, Optional
import tensorflow as tf
from official.projects.yt8m.modeling import yt8m_model_utils as utils
layers = tf.keras.layers
class MoeModel(tf.keras.Model):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(shape=[None, 128]),
vocab_size: int = 3862,
num_mixtures: int = 2,
use_input_context_gate: bool = False,
use_output_context_gate: bool = False,
normalizer_params: Optional[dict[str, Any]] = None,
vocab_as_last_dim: bool = False,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers
in the mixture is not trained, and always predicts 0.
Args:
input_specs: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
use_input_context_gate: if True apply context gate layer to the input.
use_output_context_gate: if True apply context gate layer to the output.
normalizer_params: parameters of the batch normalization.
vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as
the last dimension to avoid small `num_mixtures` as the last dimension.
XLA pads up the dimensions of tensors: typically the last dimension will
be padded to 128, and the second to last will be padded to 8.
l2_regularizer: An optional L2 weight regularizer.
**kwargs: extra key word args.
Returns:
A dictionary with a tensor containing the probability predictions
of the model in the 'predictions' key. The dimensions of the tensor
are batch_size x num_classes.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
model_input = inputs
if use_input_context_gate:
model_input = utils.context_gate(
model_input,
normalizer_fn=layers.BatchNormalization,
normalizer_params=normalizer_params,
)
gate_activations = layers.Dense(
vocab_size * (num_mixtures + 1),
activation=None,
bias_initializer=None,
kernel_regularizer=l2_regularizer)(
model_input)
expert_activations = layers.Dense(
vocab_size * num_mixtures,
activation=None,
kernel_regularizer=l2_regularizer)(
model_input)
if vocab_as_last_dim:
# Batch x (num_mixtures + 1) x #Labels
gate_activations = tf.reshape(
gate_activations, [-1, num_mixtures + 1, vocab_size])
# Batch x num_mixtures x #Labels
expert_activations = tf.reshape(
expert_activations, [-1, num_mixtures, vocab_size])
else:
# (Batch * #Labels) x (num_mixtures + 1)
gate_activations = tf.reshape(gate_activations, [-1, num_mixtures + 1])
# (Batch * #Labels) x num_mixtures
expert_activations = tf.reshape(expert_activations, [-1, num_mixtures])
gating_distribution = tf.nn.softmax(gate_activations, axis=1)
expert_distribution = tf.nn.sigmoid(expert_activations)
final_probabilities = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, axis=1)
if not vocab_as_last_dim:
final_probabilities = tf.reshape(final_probabilities, [-1, vocab_size])
if use_output_context_gate:
final_probabilities = utils.context_gate(
final_probabilities,
normalizer_fn=layers.BatchNormalization,
normalizer_params=normalizer_params,
)
outputs = {"predictions": final_probabilities}
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
| 4,618 | 37.491667 | 80 | py |
models | models-master/official/projects/yt8m/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregation heads package definition."""
from official.projects.yt8m.modeling.heads.logistic import LogisticModel
from official.projects.yt8m.modeling.heads.moe import MoeModel
| 790 | 40.631579 | 74 | py |
models | models-master/official/projects/yt8m/tasks/yt8m_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification task definition."""
from typing import Dict, List, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.dataloaders import yt8m_input
from official.projects.yt8m.eval_utils import eval_util
from official.projects.yt8m.modeling import yt8m_model
@task_factory.register_task_cls(yt8m_cfg.YT8MTask)
class YT8MTask(base_task.Task):
"""A task for video classification."""
def build_model(self):
"""Builds model for YT8M Task."""
train_cfg = self.task_config.train_data
common_input_shape = [None, sum(train_cfg.feature_sizes)]
# [batch_size x num_frames x num_features]
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Model configuration.
model_config = self.task_config.model
model = yt8m_model.VideoClassificationModel(
params=model_config,
input_specs=input_specs,
num_classes=train_cfg.num_classes,
l2_weight_decay=l2_weight_decay)
non_trainable_batch_norm_variables = []
non_trainable_extra_variables = []
for var in model.non_trainable_variables:
if 'moving_mean' in var.name or 'moving_variance' in var.name:
non_trainable_batch_norm_variables.append(var)
else:
non_trainable_extra_variables.append(var)
logging.info(
'Trainable model variables:\n%s',
'\n'.join(
[f'{var.name}\t{var.shape}' for var in model.trainable_variables]
),
)
logging.info(
(
'Non-trainable batch norm variables (get updated in training'
' mode):\n%s'
),
'\n'.join(
[
f'{var.name}\t{var.shape}'
for var in non_trainable_batch_norm_variables
]
),
)
logging.info(
'Non-trainable frozen model variables:\n%s',
'\n'.join(
[
f'{var.name}\t{var.shape}'
for var in non_trainable_extra_variables
]
),
)
return model
def build_inputs(self, params: yt8m_cfg.DataConfig, input_context=None):
"""Builds input.
Args:
params: configuration for input data
input_context: indicates information about the compute replicas and input
pipelines
Returns:
dataset: dataset fetched from reader
"""
decoder = yt8m_input.Decoder(input_params=params)
decoder_fn = decoder.decode
parser = yt8m_input.Parser(input_params=params)
parser_fn = parser.parse_fn(params.is_training)
postprocess = yt8m_input.PostBatchProcessor(input_params=params)
postprocess_fn = postprocess.post_fn
transform_batch = yt8m_input.TransformBatcher(input_params=params)
batch_fn = transform_batch.batch_fn
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder_fn,
parser_fn=parser_fn,
postprocess_fn=postprocess_fn,
transform_and_batch_fn=batch_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels,
model_outputs,
label_weights=None,
aux_losses=None):
"""Sigmoid Cross Entropy.
Args:
labels: tensor containing truth labels.
model_outputs: output probabilities of the classifier.
label_weights: optional tensor of label weights.
aux_losses: tensor containing auxiliarly loss tensors, i.e. `losses` in
keras.Model.
Returns:
A dict of tensors contains total loss, model loss tensors.
"""
losses_config = self.task_config.losses
model_loss = tf.keras.losses.binary_crossentropy(
tf.expand_dims(labels, axis=-1),
tf.expand_dims(model_outputs, axis=-1),
from_logits=losses_config.from_logits,
label_smoothing=losses_config.label_smoothing,
axis=-1)
if label_weights is None:
model_loss = tf_utils.safe_mean(model_loss)
else:
model_loss = model_loss * label_weights
# Manutally compute weighted mean loss.
total_loss = tf.reduce_sum(model_loss)
total_weight = tf.cast(
tf.reduce_sum(label_weights), dtype=total_loss.dtype)
model_loss = tf.math.divide_no_nan(total_loss, total_weight)
total_loss = model_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
return {'total_loss': total_loss, 'model_loss': model_loss}
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation.
metric: mAP/gAP
top_k: A positive integer specifying how many predictions are considered
per video.
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Args:
training: Bool value, true for training mode, false for eval/validation.
Returns:
A list of metrics to be used.
"""
metrics = []
metric_names = ['total_loss', 'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if (
self.task_config.evaluation.average_precision is not None
and not training
):
# Cannot run in train step.
num_classes = self.task_config.validation_data.num_classes
top_k = self.task_config.evaluation.average_precision.top_k
top_n = self.task_config.evaluation.average_precision.top_n
self.avg_prec_metric = eval_util.EvaluationMetrics(
num_classes, top_k=top_k, top_n=top_n)
return metrics
def process_metrics(
self,
metrics: List[tf.keras.metrics.Metric],
labels: tf.Tensor,
outputs: tf.Tensor,
model_losses: Optional[Dict[str, tf.Tensor]] = None,
label_weights: Optional[tf.Tensor] = None,
training: bool = True,
**kwargs,
) -> Dict[str, Tuple[tf.Tensor, ...]]:
"""Updates metrics.
Args:
metrics: Evaluation metrics to be updated.
labels: A tensor containing truth labels.
outputs: Model output logits of the classifier.
model_losses: An optional dict of model losses.
label_weights: Optional label weights, can be broadcast into shape of
outputs/labels.
training: Bool indicates if in training mode.
**kwargs: Additional input arguments.
Returns:
Updated dict of metrics log.
"""
if model_losses is None:
model_losses = {}
logs = {}
if (
self.task_config.evaluation.average_precision is not None
and not training
):
logs.update({self.avg_prec_metric.name: (labels, outputs)})
for m in metrics:
if m.name in model_losses:
m.update_state(model_losses[m.name])
logs[m.name] = m.result()
return logs
def _preprocess_model_inputs(self,
inputs: dict[str, tf.Tensor],
training: bool = True):
"""Preprocesses input tensors before model on device."""
del training
return inputs['video_matrix']
def _preprocess_labels(self,
inputs: dict[str, tf.Tensor],
training: bool = True):
"""Preprocesses labels."""
del training # training is unused in _preprocess_labels in YT8M.
labels = inputs['labels']
label_weights = inputs.get('label_weights', None)
return labels, label_weights
def _postprocess_outputs(self,
outputs,
labels,
label_weights,
training: bool = True):
"""Postprocess model outputs (inputs / labels / label_weights)."""
if not training and self.task_config.validation_data.segment_labels:
# workaround to ignore the unrated labels.
outputs *= label_weights
# remove padding
outputs = outputs[~tf.reduce_all(labels == -1, axis=1)]
labels = labels[~tf.reduce_all(labels == -1, axis=1)]
return outputs, labels, label_weights
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors. output_dict = { "video_ids":
batch_video_ids, "video_matrix": batch_video_matrix, "labels":
batch_labels, "num_frames": batch_frames, }
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
model_inputs = self._preprocess_model_inputs(inputs, training=True)
labels, label_weights = self._preprocess_labels(inputs, training=True)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(model_inputs, training=True)['predictions']
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Post-process model / label outputs.
outputs, labels, label_weights = self._postprocess_outputs(
outputs, labels, label_weights, training=True)
# Computes per-replica loss
all_losses = self.build_losses(
model_outputs=outputs,
labels=labels,
label_weights=label_weights,
aux_losses=model.losses)
loss = all_losses['total_loss']
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads,
self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
logs.update(
self.process_metrics(
metrics,
labels=labels,
outputs=outputs,
model_losses=all_losses,
label_weights=label_weights,
training=True))
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors. output_dict = { "video_ids":
batch_video_ids, "video_matrix": batch_video_matrix, "labels":
batch_labels, "num_frames": batch_frames}.
model: the model, forward definition.
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
model_inputs = self._preprocess_model_inputs(inputs, training=False)
labels, label_weights = self._preprocess_labels(inputs, training=False)
outputs = self.inference_step(model_inputs, model)['predictions']
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs, labels, label_weights = self._postprocess_outputs(
outputs, labels, label_weights, training=False)
all_losses = self.build_losses(
labels=labels,
model_outputs=outputs,
label_weights=label_weights,
aux_losses=model.losses)
logs = {self.loss: all_losses['total_loss']}
logs.update(
self.process_metrics(
metrics,
labels=labels,
outputs=outputs,
model_losses=all_losses,
label_weights=inputs.get('label_weights', None),
training=False))
return logs
def inference_step(self, inputs, model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_logs=None):
if self.task_config.evaluation.average_precision is not None:
if state is None:
state = self.avg_prec_metric
self.avg_prec_metric.accumulate(
labels=step_logs[self.avg_prec_metric.name][0],
predictions=step_logs[self.avg_prec_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.task_config.evaluation.average_precision is not None:
avg_prec_metrics = self.avg_prec_metric.get(
self.task_config.evaluation.average_precision.return_per_class_ap)
self.avg_prec_metric.clear()
return avg_prec_metrics
return None
| 13,880 | 34.141772 | 80 | py |
models | models-master/official/projects/yt8m/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks package definition."""
from official.projects.yt8m.tasks import yt8m_task
| 692 | 39.764706 | 74 | py |
models | models-master/official/projects/s3d/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver for S3D."""
from absl import app
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.projects.s3d.configs.google import s3d as s3d_config
from official.projects.s3d.modeling import s3d
from official.projects.s3d.tasks.google import automl_video_classification
from official.vision import registry_imports
# pylint: enable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,149 | 36.096774 | 74 | py |
models | models-master/official/projects/s3d/configs/s3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3D model configurations."""
import dataclasses
from typing import Text
from official.modeling import hyperparams
from official.vision.configs import backbones_3d
from official.vision.configs import video_classification
@dataclasses.dataclass
class S3D(hyperparams.Config):
"""S3D backbone config.
Attributes:
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
first_temporal_kernel_size: Specifies the temporal kernel size for the first
conv3d filter. A larger value slows down the model but provides little
accuracy improvement. Must be set to one of 1, 3, 5 or 7.
temporal_conv_start_at: Specifies the first conv block to use separable 3D
convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs.
gating_start_at: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating.
swap_pool_and_1x1x1: If True, in Branch_3 1x1x1 convolution is performed
first, then followed by max pooling. 1x1x1 convolution is used to reduce
the number of filters. Thus, max pooling is performed on less filters.
gating_style: Self gating can be applied after each branch and/or after each
inception cell. It can be one of ['BRANCH', 'CELL', 'BRANCH_AND_CELL'].
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_type: It can be one of ['3d', '2+1d', '1+2d', '1+1+1d'] where
'3d' is SPATIOTEMPORAL 3d convolution, '2+1d' is SPATIAL_TEMPORAL_SEPARATE
with 2D convolution on the spatial dimensions followed by 1D convolution
on the temporal dimension, '1+2d' is TEMPORAL_SPATIAL_SEPARATE with 1D
convolution on the temporal dimension followed by 2D convolution on the
spatial dimensions, and '1+1+1d' is FULLY_SEPARATE with 1D convolutions on
the horizontal, vertical, and temporal dimensions, respectively.
depth_multiplier: Float multiplier for the depth (number of channels) for
all convolution ops. The value must be greater than zero. Typical usage
will be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
"""
final_endpoint: Text = 'Mixed_5c'
first_temporal_kernel_size: int = 3
temporal_conv_start_at: Text = 'Conv2d_2c_3x3'
gating_start_at: Text = 'Conv2d_2c_3x3'
swap_pool_and_1x1x1: bool = True
gating_style: Text = 'CELL'
use_sync_bn: bool = False
norm_momentum: float = 0.999
norm_epsilon: float = 0.001
temporal_conv_type: Text = '2+1d'
depth_multiplier: float = 1.0
@dataclasses.dataclass
class Backbone3D(backbones_3d.Backbone3D):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
s3d: s3d backbone config.
"""
type: str = 's3d'
s3d: S3D = dataclasses.field(default_factory=S3D)
@dataclasses.dataclass
class S3DModel(video_classification.VideoClassificationModel):
"""The S3D model config.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
backbone: backbone config.
"""
model_type: str = 's3d'
backbone: Backbone3D = dataclasses.field(default_factory=Backbone3D)
| 4,457 | 44.030303 | 80 | py |
models | models-master/official/projects/s3d/modeling/inception_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import inception_utils
class InceptionUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((1.0, 3, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}),
(0.5, 5, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}),
(0.25, 7, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}))
def test_s3d_stem_cells(self, depth_multiplier, first_temporal_kernel_size,
temporal_conv_endpoints):
batch_size = 1
num_frames = 64
height, width = 224, 224
inputs = tf.keras.layers.Input(
shape=(num_frames, height, width, 3), batch_size=batch_size)
outputs, output_endpoints = inception_utils.inception_v1_stem_cells(
inputs,
depth_multiplier,
'Mixed_5c',
temporal_conv_endpoints=temporal_conv_endpoints,
self_gating_endpoints={'Conv2d_2c_3x3'},
first_temporal_kernel_size=first_temporal_kernel_size)
self.assertListEqual(outputs.shape.as_list(),
[batch_size, 32, 28, 28, int(192 * depth_multiplier)])
expected_endpoints = {
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3'
}
self.assertSetEqual(expected_endpoints, set(output_endpoints.keys()))
@parameterized.parameters(
('3d', True, True, True),
('2d', False, False, True),
('1+2d', True, False, False),
('2+1d', False, True, False),
)
def test_inception_v1_cell_endpoint_match(self, conv_type,
swap_pool_and_1x1x1,
use_self_gating_on_branch,
use_self_gating_on_cell):
batch_size = 5
num_frames = 32
channels = 128
height, width = 28, 28
inputs = tf.keras.layers.Input(
shape=(num_frames, height, width, channels), batch_size=batch_size)
inception_v1_cell_layer = inception_utils.InceptionV1CellLayer(
[[64], [96, 128], [16, 32], [32]],
conv_type=conv_type,
swap_pool_and_1x1x1=swap_pool_and_1x1x1,
use_self_gating_on_branch=use_self_gating_on_branch,
use_self_gating_on_cell=use_self_gating_on_cell,
name='test')
outputs = inception_v1_cell_layer(inputs)
# self.assertTrue(net.op.name.startswith('test'))
self.assertListEqual(outputs.shape.as_list(),
[batch_size, 32, 28, 28, 256])
if __name__ == '__main__':
tf.test.main()
| 3,194 | 36.588235 | 79 | py |
models | models-master/official/projects/s3d/modeling/s3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for S3D model."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import s3d
class S3dTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(7, 224, 224, 3),
(7, 128, 128, 3),
(7, 256, 256, 3),
(7, 192, 192, 3),
(64, 224, 224, 3),
(32, 224, 224, 3),
(64, 224, 224, 11),
(32, 224, 224, 11),
)
def test_build(self, num_frames, height, width, first_temporal_kernel_size):
batch_size = 5
input_shape = [batch_size, num_frames, height, width, 3]
input_specs = tf.keras.layers.InputSpec(shape=input_shape)
network = s3d.S3D(
input_specs=input_specs
)
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=input_shape[0])
endpoints = network(inputs)
temporal_1a = (num_frames - 1)//2 + 1
expected_shapes = {
'Conv2d_1a_7x7': [5, temporal_1a, height//2, width//2, 64],
'Conv2d_2b_1x1': [5, temporal_1a, height//4, width//4, 64],
'Conv2d_2c_3x3': [5, temporal_1a, height//4, height//4, 192],
'MaxPool_2a_3x3': [5, temporal_1a, height//4, height//4, 64],
'MaxPool_3a_3x3': [5, temporal_1a, height//8, width//8, 192],
'Mixed_3b': [5, temporal_1a, height//8, width//8, 256],
'Mixed_3c': [5, temporal_1a, height//8, width//8, 480],
'MaxPool_4a_3x3': [5, temporal_1a//2, height//16, width//16, 480],
'Mixed_4b': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4c': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4d': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4e': [5, temporal_1a//2, height//16, width//16, 528],
'Mixed_4f': [5, temporal_1a//2, height//16, width//16, 832],
'MaxPool_5a_2x2': [5, temporal_1a//4, height//32, width//32, 832],
'Mixed_5b': [5, temporal_1a//4, height//32, width//32, 832],
'Mixed_5c': [5, temporal_1a//4, height//32, width//32, 1024],
}
output_shapes = dict()
for end_point, output_tensor in endpoints.items():
output_shapes[end_point] = output_tensor.shape.as_list()
self.assertDictEqual(output_shapes, expected_shapes)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
input_specs=tf.keras.layers.InputSpec(shape=(5, 64, 224, 224, 3)),
final_endpoint='Mixed_5c',
first_temporal_kernel_size=3,
temporal_conv_start_at='Conv2d_2c_3x3',
gating_start_at='Conv2d_2c_3x3',
swap_pool_and_1x1x1=True,
gating_style='CELL',
use_sync_bn=False,
norm_momentum=0.999,
norm_epsilon=0.001,
temporal_conv_initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
temporal_conv_type='2+1d',
kernel_initializer='truncated_normal',
kernel_regularizer='l2',
depth_multiplier=1.0
)
network = s3d.S3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = s3d.S3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,127 | 37.579439 | 79 | py |
models | models-master/official/projects/s3d/modeling/s3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the Tensorflow 2 version definition of S3D model.
S3D model is described in the following paper:
https://arxiv.org/abs/1712.04851.
"""
from typing import Any, Dict, Mapping, Optional, Sequence, Text, Tuple, Union
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.s3d.configs import s3d as cfg
from official.projects.s3d.modeling import inception_utils
from official.projects.s3d.modeling import net_utils
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
class S3D(tf.keras.Model):
"""Class to build S3D family model."""
def __init__(self,
input_specs: tf.keras.layers.InputSpec,
final_endpoint: Text = 'Mixed_5c',
first_temporal_kernel_size: int = 3,
temporal_conv_start_at: Text = 'Conv2d_2c_3x3',
gating_start_at: Text = 'Conv2d_2c_3x3',
swap_pool_and_1x1x1: bool = True,
gating_style: Text = 'CELL',
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text,
initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
temporal_conv_type: Text = '2+1d',
kernel_initializer: Union[
Text,
initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
depth_multiplier: float = 1.0,
**kwargs):
"""Constructor.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
final_endpoint: Specifies the endpoint to construct the network up to.
first_temporal_kernel_size: Temporal kernel size of the first convolution
layer.
temporal_conv_start_at: Specifies the endpoint where to start performimg
temporal convolution from.
gating_start_at: Specifies the endpoint where to start performimg self
gating from.
swap_pool_and_1x1x1: A boolean flag indicates that whether to swap the
order of convolution and max pooling in Branch_3 of inception v1 cell.
gating_style: A string that specifies self gating to be applied after each
branch and/or after each cell. It can be one of ['BRANCH', 'CELL',
'BRANCH_AND_CELL'].
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolutional
layers.
temporal_conv_type: The type of parameterized convolution. Currently, we
support '2d', '3d', '2+1d', '1+2d'.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
depth_multiplier: A float to reduce/increase number of channels.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._final_endpoint = final_endpoint
self._first_temporal_kernel_size = first_temporal_kernel_size
self._temporal_conv_start_at = temporal_conv_start_at
self._gating_start_at = gating_start_at
self._swap_pool_and_1x1x1 = swap_pool_and_1x1x1
self._gating_style = gating_style
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._temporal_conv_initializer = temporal_conv_initializer
self._temporal_conv_type = temporal_conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._depth_multiplier = depth_multiplier
self._temporal_conv_endpoints = net_utils.make_set_from_start_endpoint(
temporal_conv_start_at, inception_utils.INCEPTION_V1_CONV_ENDPOINTS)
self._self_gating_endpoints = net_utils.make_set_from_start_endpoint(
gating_start_at, inception_utils.INCEPTION_V1_CONV_ENDPOINTS)
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net, end_points = inception_utils.inception_v1_stem_cells(
inputs,
depth_multiplier,
final_endpoint,
temporal_conv_endpoints=self._temporal_conv_endpoints,
self_gating_endpoints=self._self_gating_endpoints,
temporal_conv_type=self._temporal_conv_type,
first_temporal_kernel_size=self._first_temporal_kernel_size,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
parameterized_conv_layer=self._get_parameterized_conv_layer_impl(),
layer_naming_fn=self._get_layer_naming_fn(),
)
for end_point, filters in inception_utils.INCEPTION_V1_ARCH_SKELETON:
net, end_points = self._s3d_cell(net, end_point, end_points, filters)
if end_point == final_endpoint:
break
if final_endpoint not in end_points:
raise ValueError(
'Unrecognized final endpoint %s (available endpoints: %s).' %
(final_endpoint, end_points.keys()))
super(S3D, self).__init__(inputs=inputs, outputs=end_points, **kwargs)
def _s3d_cell(
self,
net: tf.Tensor,
end_point: Text,
end_points: Dict[Text, tf.Tensor],
filters: Union[int, Sequence[Any]],
non_local_block: Optional[tf.keras.layers.Layer] = None,
attention_cell: Optional[tf.keras.layers.Layer] = None,
attention_cell_super_graph: Optional[tf.keras.layers.Layer] = None
) -> Tuple[tf.Tensor, Dict[Text, tf.Tensor]]:
if end_point.startswith('Mixed'):
conv_type = (
self._temporal_conv_type
if end_point in self._temporal_conv_endpoints else '2d')
use_self_gating_on_branch = (
end_point in self._self_gating_endpoints and
(self._gating_style == 'BRANCH' or
self._gating_style == 'BRANCH_AND_CELL'))
use_self_gating_on_cell = (
end_point in self._self_gating_endpoints and
(self._gating_style == 'CELL' or
self._gating_style == 'BRANCH_AND_CELL'))
net = self._get_inception_v1_cell_layer_impl()(
branch_filters=net_utils.apply_depth_multiplier(
filters, self._depth_multiplier),
conv_type=conv_type,
temporal_dilation_rate=1,
swap_pool_and_1x1x1=self._swap_pool_and_1x1x1,
use_self_gating_on_branch=use_self_gating_on_branch,
use_self_gating_on_cell=use_self_gating_on_cell,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
kernel_initializer=self._kernel_initializer,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_regularizer=self._kernel_regularizer,
parameterized_conv_layer=self._get_parameterized_conv_layer_impl(),
name=self._get_layer_naming_fn()(end_point))(
net)
else:
net = tf.keras.layers.MaxPool3D(
pool_size=filters[0],
strides=filters[1],
padding='same',
name=self._get_layer_naming_fn()(end_point))(
net)
end_points[end_point] = net
if non_local_block:
# TODO(b/182299420): Implement non local block in TF2.
raise NotImplementedError('Non local block is not implemented yet.')
if attention_cell:
# TODO(b/182299420): Implement attention cell in TF2.
raise NotImplementedError('Attention cell is not implemented yet.')
if attention_cell_super_graph:
# TODO(b/182299420): Implement attention cell super graph in TF2.
raise NotImplementedError('Attention cell super graph is not implemented'
' yet.')
return net, end_points
def get_config(self):
config_dict = {
'input_specs': self._input_specs,
'final_endpoint': self._final_endpoint,
'first_temporal_kernel_size': self._first_temporal_kernel_size,
'temporal_conv_start_at': self._temporal_conv_start_at,
'gating_start_at': self._gating_start_at,
'swap_pool_and_1x1x1': self._swap_pool_and_1x1x1,
'gating_style': self._gating_style,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'temporal_conv_initializer': self._temporal_conv_initializer,
'temporal_conv_type': self._temporal_conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'depth_multiplier': self._depth_multiplier
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
def _get_inception_v1_cell_layer_impl(self):
return inception_utils.InceptionV1CellLayer
def _get_parameterized_conv_layer_impl(self):
return net_utils.ParameterizedConvLayer
def _get_layer_naming_fn(self):
return lambda end_point: None
class S3DModel(tf.keras.Model):
"""An S3D model builder."""
def __init__(self,
backbone: tf.keras.Model,
num_classes: int,
input_specs: Mapping[Text, tf.keras.layers.InputSpec],
final_endpoint: Text = 'Mixed_5c',
dropout_rate: float = 0.0,
**kwargs):
"""Constructor.
Args:
backbone: S3D backbone Keras Model.
num_classes: `int` number of possible classes for video classification.
input_specs: input_specs: `tf.keras.layers.InputSpec` specs of the input
tensor.
final_endpoint: Specifies the endpoint to construct the network up to.
dropout_rate: `float` between 0 and 1. Fraction of the input units to
drop. Note that dropout_rate = 1.0 - dropout_keep_prob.
**kwargs: keyword arguments to be passed.
"""
self._self_setattr_tracking = False
self._backbone = backbone
self._num_classes = num_classes
self._input_specs = input_specs
self._final_endpoint = final_endpoint
self._dropout_rate = dropout_rate
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'input_specs': input_specs,
'final_endpoint': final_endpoint,
'dropout_rate': dropout_rate,
}
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
streams = self._backbone(inputs['image'])
pool = tf.math.reduce_mean(streams[self._final_endpoint], axis=[1, 2, 3])
fc = tf.keras.layers.Dropout(dropout_rate)(pool)
logits = tf.keras.layers.Dense(**self._build_dense_layer_params())(fc)
super(S3DModel, self).__init__(inputs=inputs, outputs=logits, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def _build_dense_layer_params(self):
return dict(units=self._num_classes, kernel_regularizer='l2')
@backbone_factory.register_backbone_builder('s3d')
def build_s3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds S3D backbone."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 's3d'
del norm_activation_config
backbone = S3D(
input_specs=input_specs,
final_endpoint=backbone_cfg.final_endpoint,
first_temporal_kernel_size=backbone_cfg.first_temporal_kernel_size,
temporal_conv_start_at=backbone_cfg.temporal_conv_start_at,
gating_start_at=backbone_cfg.gating_start_at,
swap_pool_and_1x1x1=backbone_cfg.swap_pool_and_1x1x1,
gating_style=backbone_cfg.gating_style,
use_sync_bn=backbone_cfg.use_sync_bn,
norm_momentum=backbone_cfg.norm_momentum,
norm_epsilon=backbone_cfg.norm_epsilon,
temporal_conv_type=backbone_cfg.temporal_conv_type,
kernel_regularizer=l2_regularizer,
depth_multiplier=backbone_cfg.depth_multiplier)
return backbone
@model_factory.register_model_builder('s3d')
def build_s3d_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.S3DModel,
num_classes: int,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds S3D model with classification layer."""
input_specs_dict = {'image': input_specs}
backbone = build_s3d(input_specs, model_config.backbone,
model_config.norm_activation, l2_regularizer)
model = S3DModel(
backbone,
num_classes=num_classes,
input_specs=input_specs_dict,
dropout_rate=model_config.dropout_rate)
return model
| 14,486 | 39.579832 | 80 | py |
models | models-master/official/projects/s3d/modeling/inception_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains modules related to Inception networks."""
from typing import Callable, Dict, Optional, Sequence, Set, Text, Tuple, Type, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.s3d.modeling import net_utils
from official.vision.modeling.layers import nn_blocks_3d
INCEPTION_V1_CONV_ENDPOINTS = [
'Conv2d_1a_7x7', 'Conv2d_2c_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'Mixed_5b', 'Mixed_5c'
]
# Mapping from endpoint to branch filters. The endpoint shapes below are
# specific for input 64x224x224.
INCEPTION_V1_ARCH_SKELETON = [
('Mixed_3b', [[64], [96, 128], [16, 32], [32]]), # 32x28x28x256
('Mixed_3c', [[128], [128, 192], [32, 96], [64]]), # 32x28x28x480
('MaxPool_4a_3x3', [[3, 3, 3], [2, 2, 2]]), # 16x14x14x480
('Mixed_4b', [[192], [96, 208], [16, 48], [64]]), # 16x14x14x512
('Mixed_4c', [[160], [112, 224], [24, 64], [64]]), # 16x14x14x512
('Mixed_4d', [[128], [128, 256], [24, 64], [64]]), # 16x14x14x512
('Mixed_4e', [[112], [144, 288], [32, 64], [64]]), # 16x14x14x528
('Mixed_4f', [[256], [160, 320], [32, 128], [128]]), # 16x14x14x832
('MaxPool_5a_2x2', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832
('Mixed_5b', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832
('Mixed_5c', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024
]
INCEPTION_V1_LOCAL_SKELETON = [
('MaxPool_5a_2x2_local', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832
('Mixed_5b_local', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832
('Mixed_5c_local', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024
]
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
def inception_v1_stem_cells(
inputs: tf.Tensor,
depth_multiplier: float,
final_endpoint: Text,
temporal_conv_endpoints: Optional[Set[Text]] = None,
self_gating_endpoints: Optional[Set[Text]] = None,
temporal_conv_type: Text = '3d',
first_temporal_kernel_size: int = 7,
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
parameterized_conv_layer: Type[
net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer,
layer_naming_fn: Callable[[Text], Text] = lambda end_point: None,
) -> Tuple[tf.Tensor, Dict[Text, tf.Tensor]]:
"""Stem cells used in the original I3D/S3D model.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
depth_multiplier: A float to reduce/increase number of channels.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3'].
temporal_conv_endpoints: Specifies the endpoints where to perform temporal
convolution.
self_gating_endpoints: Specifies the endpoints where to perform self gating.
temporal_conv_type: '3d' for I3D model and '2+1d' for S3D model.
first_temporal_kernel_size: temporal kernel size of the first convolution
layer.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolution
inside the cell. It only applies to 2+1d and 1+2d cases.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
parameterized_conv_layer: class for parameterized conv layer.
layer_naming_fn: function to customize conv / pooling layer names given
endpoint name of the block. This is mainly used to creat model that is
compatible with TF1 checkpoints.
Returns:
A dictionary from components of the network to the corresponding activation.
"""
if temporal_conv_endpoints is None:
temporal_conv_endpoints = set()
if self_gating_endpoints is None:
self_gating_endpoints = set()
if use_sync_bn:
batch_norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
batch_norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
end_points = {}
# batch_size x 32 x 112 x 112 x 64
end_point = 'Conv2d_1a_7x7'
net = tf.keras.layers.Conv3D(
filters=net_utils.apply_depth_multiplier(64, depth_multiplier),
kernel_size=[first_temporal_kernel_size, 7, 7],
strides=[2, 2, 2],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
inputs)
net = batch_norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
scale=False,
gamma_initializer='ones',
name=layer_naming_fn(end_point + '/BatchNorm'))(
net)
net = tf.nn.relu(net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'MaxPool_2a_3x3'
net = tf.keras.layers.MaxPool3D(
pool_size=[1, 3, 3],
strides=[1, 2, 2],
padding='same',
name=layer_naming_fn(end_point))(
net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = tf.keras.layers.Conv3D(
filters=net_utils.apply_depth_multiplier(64, depth_multiplier),
strides=[1, 1, 1],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
net)
net = batch_norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
scale=False,
gamma_initializer='ones',
name=layer_naming_fn(end_point + '/BatchNorm'))(
net)
net = tf.nn.relu(net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 192
end_point = 'Conv2d_2c_3x3'
if end_point not in temporal_conv_endpoints:
temporal_conv_type = '2d'
net = parameterized_conv_layer(
conv_type=temporal_conv_type,
kernel_size=3,
filters=net_utils.apply_depth_multiplier(192, depth_multiplier),
strides=[1, 1, 1],
rates=[1, 1, 1],
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
temporal_conv_initializer=temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
net)
if end_point in self_gating_endpoints:
net = nn_blocks_3d.SelfGating(
filters=net_utils.apply_depth_multiplier(192, depth_multiplier),
name=layer_naming_fn(end_point + '/self_gating'))(
net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 192
end_point = 'MaxPool_3a_3x3'
net = tf.keras.layers.MaxPool3D(
pool_size=[1, 3, 3],
strides=[1, 2, 2],
padding='same',
name=layer_naming_fn(end_point))(
net)
end_points[end_point] = net
return net, end_points
def _construct_branch_3_layers(
channels: int,
swap_pool_and_1x1x1: bool,
pool_type: Text,
batch_norm_layer: tf.keras.layers.Layer,
kernel_initializer: Union[Text, initializers.Initializer],
kernel_regularizer: Union[Text, regularizers.Regularizer],
):
"""Helper function for Branch 3 inside Inception module."""
kernel_size = [1, 3, 3] if pool_type == '2d' else [3] * 3
conv = tf.keras.layers.Conv3D(
filters=channels,
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)
activation = tf.keras.layers.Activation('relu')
pool = tf.keras.layers.MaxPool3D(
pool_size=kernel_size, strides=[1, 1, 1], padding='same')
if swap_pool_and_1x1x1:
branch_3_layers = [conv, batch_norm_layer, activation, pool]
else:
branch_3_layers = [pool, conv, batch_norm_layer, activation]
return branch_3_layers
class InceptionV1CellLayer(tf.keras.layers.Layer):
"""A single Tensorflow 2 cell used in the original I3D/S3D model."""
def __init__(
self,
branch_filters: Sequence[Sequence[int]],
conv_type: Text = '3d',
temporal_dilation_rate: int = 1,
swap_pool_and_1x1x1: bool = False,
use_self_gating_on_branch: bool = False,
use_self_gating_on_cell: bool = False,
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
parameterized_conv_layer: Type[
net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer,
**kwargs):
"""A cell structure inspired by Inception V1.
Args:
branch_filters: Specifies the number of filters in four branches
(Branch_0, Branch_1, Branch_2, Branch_3). Single number for Branch_0 and
Branch_3. For Branch_1 and Branch_2, each need to specify two numbers,
one for 1x1x1 and one for 3x3x3.
conv_type: The type of parameterized convolution. Currently, we support
'2d', '3d', '2+1d', '1+2d'.
temporal_dilation_rate: The dilation rate for temporal convolution.
swap_pool_and_1x1x1: A boolean flag indicates that whether to swap the
order of convolution and max pooling in Branch_3.
use_self_gating_on_branch: Whether or not to apply self gating on each
branch of the inception cell.
use_self_gating_on_cell: Whether or not to apply self gating on each cell
after the concatenation of all branches.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolution
inside the cell. It only applies to 2+1d and 1+2d cases.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
parameterized_conv_layer: class for parameterized conv layer.
**kwargs: keyword arguments to be passed.
Returns:
out_tensor: A 5-D float tensor of size [batch_size, num_frames, height,
width, channels].
"""
super(InceptionV1CellLayer, self).__init__(**kwargs)
self._branch_filters = branch_filters
self._conv_type = conv_type
self._temporal_dilation_rate = temporal_dilation_rate
self._swap_pool_and_1x1x1 = swap_pool_and_1x1x1
self._use_self_gating_on_branch = use_self_gating_on_branch
self._use_self_gating_on_cell = use_self_gating_on_cell
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._temporal_conv_initializer = temporal_conv_initializer
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._parameterized_conv_layer = parameterized_conv_layer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
def _build_branch_params(self):
branch_0_params = [
# Conv3D
dict(
filters=self._branch_filters[0][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
]
branch_1_params = [
# Conv3D
dict(
filters=self._branch_filters[1][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# ParameterizedConvLayer
dict(
conv_type=self._conv_type,
kernel_size=3,
filters=self._branch_filters[1][1],
strides=[1, 1, 1],
rates=[self._temporal_dilation_rate, 1, 1],
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
]
branch_2_params = [
# Conv3D
dict(
filters=self._branch_filters[2][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# ParameterizedConvLayer
dict(
conv_type=self._conv_type,
kernel_size=3,
filters=self._branch_filters[2][1],
strides=[1, 1, 1],
rates=[self._temporal_dilation_rate, 1, 1],
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
]
branch_3_params = [
# Conv3D
dict(
filters=self._branch_filters[3][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# pool
dict(
pool_size=([1, 3, 3] if self._conv_type == '2d' else [3] * 3),
strides=[1, 1, 1],
padding='same')
]
if self._use_self_gating_on_branch:
branch_0_params.append(dict(filters=self._branch_filters[0][0]))
branch_1_params.append(dict(filters=self._branch_filters[1][1]))
branch_2_params.append(dict(filters=self._branch_filters[2][1]))
branch_3_params.append(dict(filters=self._branch_filters[3][0]))
out_gating_params = []
if self._use_self_gating_on_cell:
out_channels = (
self._branch_filters[0][0] + self._branch_filters[1][1] +
self._branch_filters[2][1] + self._branch_filters[3][0])
out_gating_params.append(dict(filters=out_channels))
return [
branch_0_params, branch_1_params, branch_2_params, branch_3_params,
out_gating_params
]
def build(self, input_shape):
branch_params = self._build_branch_params()
self._branch_0_layers = [
tf.keras.layers.Conv3D(**branch_params[0][0]),
self._norm(**branch_params[0][1]),
tf.keras.layers.Activation('relu', **branch_params[0][2]),
]
self._branch_1_layers = [
tf.keras.layers.Conv3D(**branch_params[1][0]),
self._norm(**branch_params[1][1]),
tf.keras.layers.Activation('relu', **branch_params[1][2]),
self._parameterized_conv_layer(**branch_params[1][3]),
]
self._branch_2_layers = [
tf.keras.layers.Conv3D(**branch_params[2][0]),
self._norm(**branch_params[2][1]),
tf.keras.layers.Activation('relu', **branch_params[2][2]),
self._parameterized_conv_layer(**branch_params[2][3])
]
if self._swap_pool_and_1x1x1:
self._branch_3_layers = [
tf.keras.layers.Conv3D(**branch_params[3][0]),
self._norm(**branch_params[3][1]),
tf.keras.layers.Activation('relu', **branch_params[3][2]),
tf.keras.layers.MaxPool3D(**branch_params[3][3]),
]
else:
self._branch_3_layers = [
tf.keras.layers.MaxPool3D(**branch_params[3][3]),
tf.keras.layers.Conv3D(**branch_params[3][0]),
self._norm(**branch_params[3][1]),
tf.keras.layers.Activation('relu', **branch_params[3][2]),
]
if self._use_self_gating_on_branch:
self._branch_0_layers.append(
nn_blocks_3d.SelfGating(**branch_params[0][-1]))
self._branch_1_layers.append(
nn_blocks_3d.SelfGating(**branch_params[1][-1]))
self._branch_2_layers.append(
nn_blocks_3d.SelfGating(**branch_params[2][-1]))
self._branch_3_layers.append(
nn_blocks_3d.SelfGating(**branch_params[3][-1]))
if self._use_self_gating_on_cell:
self.cell_self_gating = nn_blocks_3d.SelfGating(**branch_params[4][0])
super(InceptionV1CellLayer, self).build(input_shape)
def call(self, inputs):
x = inputs
for layer in self._branch_0_layers:
x = layer(x)
branch_0 = x
x = inputs
for layer in self._branch_1_layers:
x = layer(x)
branch_1 = x
x = inputs
for layer in self._branch_2_layers:
x = layer(x)
branch_2 = x
x = inputs
for layer in self._branch_3_layers:
x = layer(x)
branch_3 = x
out_tensor = tf.concat([branch_0, branch_1, branch_2, branch_3],
axis=self._channel_axis)
if self._use_self_gating_on_cell:
out_tensor = self.cell_self_gating(out_tensor)
return out_tensor
| 20,360 | 36.916201 | 84 | py |
models | models-master/official/projects/s3d/modeling/net_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import net_utils
class Tf2NetUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('3d', [2, 1, 1], [5, 16, 28, 28, 256]),
('3d', [2, 2, 2], [5, 16, 14, 14, 256]),
('3d', [1, 2, 1], [5, 32, 14, 28, 256]),
('2d', [2, 2, 2], [5, 32, 14, 14, 256]),
('2d', [1, 1, 2], [5, 32, 28, 14, 256]),
('1+2d', [2, 2, 2], [5, 16, 14, 14, 256]),
('1+2d', [2, 1, 1], [5, 16, 28, 28, 256]),
('1+2d', [1, 1, 1], [5, 32, 28, 28, 256]),
('1+2d', [1, 1, 2], [5, 32, 28, 14, 256]),
('2+1d', [2, 2, 2], [5, 16, 14, 14, 256]),
('2+1d', [1, 1, 1], [5, 32, 28, 28, 256]),
('2+1d', [2, 1, 2], [5, 16, 28, 14, 256]),
('1+1+1d', [2, 2, 2], [5, 16, 14, 14, 256]),
('1+1+1d', [1, 1, 1], [5, 32, 28, 28, 256]),
('1+1+1d', [2, 1, 2], [5, 16, 28, 14, 256]),
)
def test_parameterized_conv_layer_creation(self, conv_type, strides,
expected_shape):
batch_size = 5
temporal_size = 32
spatial_size = 28
channels = 128
kernel_size = 3
filters = 256
rates = [1, 1, 1]
name = 'ParameterizedConv'
inputs = tf.keras.Input(
shape=(temporal_size, spatial_size, spatial_size, channels),
batch_size=batch_size)
parameterized_conv_layer = net_utils.ParameterizedConvLayer(
conv_type, kernel_size, filters, strides, rates, name=name)
features = parameterized_conv_layer(inputs)
logging.info(features.shape.as_list())
logging.info([w.name for w in parameterized_conv_layer.weights])
self.assertAllEqual(features.shape.as_list(), expected_shape)
if __name__ == '__main__':
tf.test.main()
| 2,429 | 34.217391 | 74 | py |
models | models-master/official/projects/s3d/modeling/net_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commonly used TensorFlow 2 network blocks."""
from typing import Any, Text, Sequence, Union
import tensorflow as tf
from official.modeling import tf_utils
WEIGHT_INITIALIZER = {
'Xavier': tf.keras.initializers.GlorotUniform,
'Gaussian': lambda: tf.keras.initializers.RandomNormal(stddev=0.01),
}
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
def make_set_from_start_endpoint(start_endpoint: Text,
endpoints: Sequence[Text]):
"""Makes a subset of endpoints from the given starting position."""
if start_endpoint not in endpoints:
return set()
start_index = endpoints.index(start_endpoint)
return set(endpoints[start_index:])
def apply_depth_multiplier(d: Union[int, Sequence[Any]],
depth_multiplier: float):
"""Applies depth_multiplier recursively to ints."""
if isinstance(d, int):
return int(d * depth_multiplier)
else:
return [apply_depth_multiplier(x, depth_multiplier) for x in d]
class ParameterizedConvLayer(tf.keras.layers.Layer):
"""Convolution layer based on the input conv_type."""
def __init__(
self,
conv_type: Text,
kernel_size: int,
filters: int,
strides: Sequence[int],
rates: Sequence[int],
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = 'glorot_uniform',
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
**kwargs):
super(ParameterizedConvLayer, self).__init__(**kwargs)
self._conv_type = conv_type
self._kernel_size = kernel_size
self._filters = filters
self._strides = strides
self._rates = rates
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
self._temporal_conv_initializer = temporal_conv_initializer
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
def _build_conv_layer_params(self, input_shape):
"""Builds params for conv layers."""
conv_layer_params = []
if self._conv_type == '3d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size] * 3,
strides=self._strides,
dilation_rate=self._rates,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '2d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '1+2d':
channels_in = input_shape[self._channel_axis]
conv_layer_params.append(
dict(
filters=channels_in,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._temporal_conv_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '2+1d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._temporal_conv_initializer),
))
elif self._conv_type == '1+1+1d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, 1, self._kernel_size],
strides=[1, 1, self._strides[2]],
dilation_rate=[1, 1, self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, 1],
strides=[1, self._strides[1], 1],
dilation_rate=[1, self._rates[1], 1],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
else:
raise ValueError('Unsupported conv_type: {}'.format(self._conv_type))
return conv_layer_params
def _build_norm_layer_params(self, conv_param):
"""Builds params for the norm layer after one conv layer."""
return dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones')
def _build_activation_layer_params(self, conv_param):
"""Builds params for the activation layer after one conv layer."""
return {}
def _append_conv_layer(self, param):
"""Appends conv, normalization and activation layers."""
self._parameterized_conv_layers.append(
tf.keras.layers.Conv3D(
padding='same',
use_bias=False,
kernel_regularizer=self._kernel_regularizer,
**param,
))
norm_layer_params = self._build_norm_layer_params(param)
self._parameterized_conv_layers.append(self._norm(**norm_layer_params))
relu_layer_params = self._build_activation_layer_params(param)
self._parameterized_conv_layers.append(
tf.keras.layers.Activation('relu', **relu_layer_params))
def build(self, input_shape):
self._parameterized_conv_layers = []
for conv_layer_param in self._build_conv_layer_params(input_shape):
self._append_conv_layer(conv_layer_param)
super(ParameterizedConvLayer, self).build(input_shape)
def call(self, inputs):
x = inputs
for layer in self._parameterized_conv_layers:
x = layer(x)
return x
| 8,323 | 36.495495 | 79 | py |
models | models-master/official/projects/pointpillars/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration.
Custom models, task, configs, etc need to be imported to registry so they can be
picked up by the trainer. They can be included in this file so you do not need
to handle each file separately.
"""
# pylint: disable=unused-import
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.tasks import pointpillars as task
| 1,023 | 39.96 | 80 | py |
models | models-master/official/projects/pointpillars/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars trainer."""
import os
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.pointpillars import registry_imports # pylint: disable=unused-import
from official.projects.pointpillars.utils import model_exporter
FLAGS = flags.FLAGS
def _check_if_resumed_job(model_dir: str, manual_checkpoint_path: str):
"""Check if the job is a resumed job."""
logging.info('Check if the job is resumed from %s', model_dir)
if not tf.io.gfile.exists(model_dir):
logging.info('%s not found, this is a new job.', model_dir)
return
try:
tf.train.load_checkpoint(model_dir)
except ValueError:
logging.info('No checkpoints found in %s, this is a new job.', model_dir)
return
else:
logging.info('The job is resuming from %s', model_dir)
if manual_checkpoint_path:
logging.warning('Found manually indicated checkpoint path %s for a '
'resuming job, the manual checkpoint path will be '
'ignored because the model must restore from '
'checkpoints in %s.', manual_checkpoint_path, model_dir)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
# A training job could be terminated and resumed at any time by machine
# scheduler. A resuming job will automatically restore states from the
# model_dir, like loading checkpoints. It will skip checkpointed training
# steps and start from there for subsequent training. This function simply
# checks if the job is a resumed job or not and logs info for that.
_check_if_resumed_job(model_dir, params.task.init_checkpoint)
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. 'loss_scale' takes effect only when
# dtype is float16.
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
model_exporter.export_inference_graph(
batch_size=1,
params=params,
checkpoint_path=model_dir,
export_dir=os.path.join(model_dir, 'saved_model'))
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 4,022 | 36.95283 | 92 | py |
models | models-master/official/projects/pointpillars/tools/process_wod.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to run waymo open dataset preprocessing."""
import os
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
from apache_beam.io import tfrecordio
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.pointpillars.configs import pointpillars
from official.projects.pointpillars.utils.wod_processor import WodProcessor
from waymo_open_dataset import dataset_pb2
_SRC_DIR = flags.DEFINE_string(
'src_dir', None,
'The direcotry to read official wod tfrecords,')
_DST_DIR = flags.DEFINE_string(
'dst_dir', None,
'The direcotry to write processed tfrecords.')
_CONFIG_FILE = flags.DEFINE_string(
'config_file', None,
'YAML file to specify configurations.')
_PIPELINE_OPTIONS = flags.DEFINE_string(
'pipeline_options', None,
'Command line flags to use in constructing the Beam pipeline options. '
'See https://beam.apache.org/documentation/#runners for available runners.')
# The --src_dir must contain these two sub-folders.
_SRC_FOLDERS = ['training', 'validation']
def read_dataset(pipeline: beam.Pipeline,
src_file_pattern: str) -> beam.PCollection:
reader = tfrecordio.ReadFromTFRecord(
src_file_pattern,
coder=beam.coders.ProtoCoder(dataset_pb2.Frame))
raw_frames = pipeline | f'Read frames: {src_file_pattern}' >> reader
return raw_frames
def count_examples(examples: beam.PCollection, dst_path: str):
writer = beam.io.WriteToText(
dst_path,
file_name_suffix='.stats.txt',
num_shards=1)
_ = (examples
| 'Count examples' >> beam.combiners.Count.Globally()
| 'Write statistics' >> writer)
def write_dataset(examples: beam.PCollection, dst_path: str):
writer = tfrecordio.WriteToTFRecord(
dst_path,
coder=beam.coders.ProtoCoder(tf.train.Example),
file_name_suffix='.tfrecord',
compression_type='gzip')
_ = examples | f'Write examples: {dst_path}' >> writer
def process_wod(pipeline: beam.Pipeline,
src_file_pattern: str,
dst_path: str,
wod_processor: WodProcessor):
"""Creates the process WOD dataset pipeline."""
raw_frames = read_dataset(pipeline, src_file_pattern)
examples = (
raw_frames
| 'Reshuffle post read' >> beam.Reshuffle()
| 'Process one frame' >> beam.Map(
wod_processor.process_and_convert_to_tf_example)
| 'Reshuffle post decode' >> beam.Reshuffle())
count_examples(examples, dst_path)
write_dataset(examples, dst_path)
def main(_):
pipeline_options = beam.options.pipeline_options.PipelineOptions(
_PIPELINE_OPTIONS.value.split(','))
if _CONFIG_FILE.value:
cfg = hyperparams.read_yaml_to_params_dict(_CONFIG_FILE.value)
image_config = cfg.task.model.image
pillars_config = cfg.task.model.pillars
else:
cfg = pointpillars
image_config = cfg.ImageConfig()
pillars_config = cfg.PillarsConfig()
wod_processor = WodProcessor(image_config, pillars_config)
for folder in _SRC_FOLDERS:
src_file_pattern = os.path.join(_SRC_DIR.value, folder, '*.tfrecord')
dst_path = os.path.join(_DST_DIR.value, folder)
logging.info('Processing %s, writing to %s', src_file_pattern, dst_path)
pipeline = beam.Pipeline(options=pipeline_options)
process_wod(pipeline, src_file_pattern, dst_path, wod_processor)
pipeline.run().wait_until_finish()
if __name__ == '__main__':
app.run(main)
| 4,087 | 33.066667 | 80 | py |
models | models-master/official/projects/pointpillars/tools/export_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export PointPillars model."""
from absl import app
from absl import flags
from absl import logging
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.pointpillars import registry_imports # pylint: disable=unused-import
from official.projects.pointpillars.utils import model_exporter
_EXPERIMENT = flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string('checkpoint_path', None,
'Checkpoint path.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'Batch size.')
_CONFIG_FILE = flags.DEFINE_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides.')
_TEST_INFERENCE = flags.DEFINE_boolean(
'test_inference',
default=False,
help='True if want to load saved model and run inference.')
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
if _CONFIG_FILE.value:
params = hyperparams.override_params_dict(
params, _CONFIG_FILE.value, is_strict=True)
params.validate()
params.lock()
model_exporter.export_inference_graph(
batch_size=_BATCH_SIZE.value,
params=params,
checkpoint_path=_CHECKPOINT_PATH.value,
export_dir=_EXPORT_DIR.value)
logging.info('Successfully exported model to %s', _EXPORT_DIR.value)
if _TEST_INFERENCE.value:
predict_fn = model_exporter.load_model_predict_fn(_EXPORT_DIR.value)
pillars, indices = model_exporter.random_input_tensors(
batch_size=_BATCH_SIZE.value, params=params,
)
_ = predict_fn(pillars=pillars, indices=indices)
logging.info('Successfully test model inference')
if __name__ == '__main__':
app.run(main)
| 2,473 | 35.925373 | 92 | py |
models | models-master/official/projects/pointpillars/configs/pointpillars.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pointpillars experiment configuration definition."""
import dataclasses
from typing import List, Optional, Tuple, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
@dataclasses.dataclass
class ImageConfig(hyperparams.Config):
"""Bird-eye-view pseudo image config."""
# The range should be large enough to cover a 64-channels Lidar points.
# The default values are chosen empirically.
x_range: Tuple[float, float] = (-76.8, 76.8)
y_range: Tuple[float, float] = (-76.8, 76.8)
z_range: Tuple[float, float] = (-3.0, 3.0)
resolution: float = 0.3
height: int = dataclasses.field(init=False)
width: int = dataclasses.field(init=False)
# Image height and width should be auto computed.
def __post_init__(self, height: int, width: int):
self.height = int((-self.x_range[0] + self.x_range[1]) / self.resolution)
self.width = int((-self.y_range[0] + self.y_range[1]) / self.resolution)
@dataclasses.dataclass
class PillarsConfig(hyperparams.Config):
"""Pillars config."""
num_pillars: int = 24000
num_points_per_pillar: int = 100
num_features_per_point: int = 10
@dataclasses.dataclass
class DataDecoder(hyperparams.Config):
"""Data decoder config."""
@dataclasses.dataclass
class DataParser(hyperparams.Config):
"""Data parser config."""
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'float32'
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: DataParser = dataclasses.field(default_factory=DataParser)
shuffle_buffer_size: int = 256
prefetch_buffer_size: int = 256
file_type: str = 'tfrecord_compressed'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
length: float = 1.0
width: float = 1.0
@dataclasses.dataclass
class AnchorLabeler(hyperparams.Config):
"""Data parser config."""
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
@dataclasses.dataclass
class Featurizer(hyperparams.Config):
num_blocks: int = 1
num_channels: int = 64
@dataclasses.dataclass
class Backbone(hyperparams.Config):
min_level: int = 1
max_level: int = 3
num_convs: int = 6
@dataclasses.dataclass
class Decoder(hyperparams.Config):
"""Feature decoder."""
# No fields yet, just a placeholder.
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
def _default_heads():
return [
AttributeHead(name='heading', type='regression', size=1),
AttributeHead(name='height', type='regression', size=1),
AttributeHead(name='z', type='regression', size=1)
]
@dataclasses.dataclass
class SSDHead(hyperparams.Config):
attribute_heads: List[AttributeHead] = dataclasses.field(
default_factory=_default_heads)
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
"""Generator."""
apply_nms: bool = True
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
nms_version: str = 'v1' # `v2`, `v1`, `batched`
use_cpu_nms: bool = False
@dataclasses.dataclass
class PointPillarsModel(hyperparams.Config):
"""The model config. Used by build_example_model function."""
classes: str = 'all'
num_classes: int = 4
image: ImageConfig = dataclasses.field(default_factory=ImageConfig)
pillars: PillarsConfig = dataclasses.field(default_factory=PillarsConfig)
anchors: List[Anchor] = dataclasses.field(default_factory=list)
anchor_labeler: AnchorLabeler = dataclasses.field(
default_factory=AnchorLabeler
)
min_level: int = 1
max_level: int = 3
featurizer: Featurizer = dataclasses.field(default_factory=Featurizer)
backbone: Backbone = dataclasses.field(default_factory=Backbone)
decoder: Decoder = dataclasses.field(default_factory=Decoder)
head: SSDHead = dataclasses.field(default_factory=SSDHead)
detection_generator: DetectionGenerator = dataclasses.field(
default_factory=DetectionGenerator
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
loss_weight: float = 1.0
box_loss_weight: int = 100
attribute_loss_weight: int = 10
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
l2_weight_decay: float = 0
@dataclasses.dataclass
class PointPillarsTask(cfg.TaskConfig):
"""The task config."""
model: PointPillarsModel = dataclasses.field(
default_factory=PointPillarsModel
)
use_raw_data: bool = False
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[str, List[str]] = 'all'
use_wod_metrics: bool = True
@exp_factory.register_config_factory('pointpillars_baseline')
def pointpillars_baseline() -> cfg.ExperimentConfig:
"""PointPillars baseline config."""
return cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=PointPillarsTask(
model=PointPillarsModel(
classes='vehicle',
num_classes=2,
min_level=1,
max_level=1,
anchors=[Anchor(length=1.0, width=1.0)],
featurizer=Featurizer(),
backbone=Backbone(),
decoder=Decoder(),
head=SSDHead()
),
train_data=DataConfig(is_training=True),
validation_data=DataConfig(is_training=False),
losses=Losses()
),
trainer=cfg.TrainerConfig(
train_steps=100,
validation_steps=100,
validation_interval=10,
steps_per_loop=10,
summary_interval=10,
checkpoint_interval=10,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'decay_steps': 100,
'initial_learning_rate': 0.16,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 10,
'warmup_learning_rate': 0.016
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
| 7,703 | 29.693227 | 77 | py |
models | models-master/official/projects/pointpillars/configs/pointpillars_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pointpillars."""
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.pointpillars.configs import pointpillars as exp_cfg
class PointPillarsConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('pointpillars_baseline',),
)
def test_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.PointPillarsTask)
self.assertIsInstance(config.task.model, exp_cfg.PointPillarsModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
self.assertIsInstance(config.task.validation_data, exp_cfg.DataConfig)
self.assertIsInstance(config.task.losses, exp_cfg.Losses)
self.assertGreater(config.task.model.image.height, 0)
self.assertGreater(config.task.model.image.width, 0)
self.assertLen(config.task.model.head.attribute_heads, 3)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,834 | 37.229167 | 74 | py |
models | models-master/official/projects/pointpillars/dataloaders/decoders_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoders."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.dataloaders import decoders
from official.vision.data.tfrecord_lib import convert_to_feature
def _mock_serialized_example(num_pillars, num_points_per_pillar,
num_features_per_point, num_boxes):
frame_id = np.random.randint(0, 10, dtype=np.int64)
pillars = np.random.rand(num_pillars, num_points_per_pillar,
num_features_per_point).astype(np.float32)
indices = np.random.randint(0, 10, size=[num_pillars, 2], dtype=np.int32)
classes = np.random.randint(0, 10, size=[num_boxes], dtype=np.int32)
ymin = np.random.rand(num_boxes).astype(np.float32)
xmin = np.random.rand(num_boxes).astype(np.float32)
ymax = np.random.rand(num_boxes).astype(np.float32)
xmax = np.random.rand(num_boxes).astype(np.float32)
heading = np.random.rand(num_boxes).astype(np.float32)
z = np.random.rand(num_boxes).astype(np.float32)
height = np.random.rand(num_boxes).astype(np.float32)
difficulty = np.random.randint(0, 10, size=[num_boxes], dtype=np.int32)
feature = {
'frame_id': convert_to_feature(frame_id, 'int64'),
'pillars': convert_to_feature(pillars.tobytes(), 'bytes'),
'indices': convert_to_feature(indices.tobytes(), 'bytes'),
'bbox/class': convert_to_feature(classes, 'int64_list'),
'bbox/ymin': convert_to_feature(ymin, 'float_list'),
'bbox/xmin': convert_to_feature(xmin, 'float_list'),
'bbox/ymax': convert_to_feature(ymax, 'float_list'),
'bbox/xmax': convert_to_feature(xmax, 'float_list'),
'bbox/heading': convert_to_feature(heading, 'float_list'),
'bbox/z': convert_to_feature(z, 'float_list'),
'bbox/height': convert_to_feature(height, 'float_list'),
'bbox/difficulty': convert_to_feature(difficulty, 'int64_list'),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
serialized_example = example.SerializeToString()
return serialized_example
class ExampleDecoderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(2, 10, 1, 1),
(3, 2, 10, 10),
)
def test_shape(self, num_pillars, num_points_per_pillar,
num_features_per_point, num_boxes):
image_config = cfg.ImageConfig()
pillar_config = cfg.PillarsConfig()
pillar_config.num_pillars = num_pillars
pillar_config.num_points_per_pillar = num_points_per_pillar
pillar_config.num_features_per_point = num_features_per_point
decoder = decoders.ExampleDecoder(image_config, pillar_config)
serialized_example = _mock_serialized_example(num_pillars,
num_points_per_pillar,
num_features_per_point,
num_boxes)
decoded_example = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_example)
self.assertAllEqual(
(num_pillars, num_points_per_pillar, num_features_per_point),
results['pillars'].shape)
self.assertAllEqual(
(num_pillars, 2), results['indices'].shape)
self.assertAllEqual(
(num_boxes,), results['gt_classes'].shape)
self.assertAllEqual(
(num_boxes, 4), results['gt_boxes'].shape)
self.assertAllEqual(
(num_boxes, 1), results['gt_attributes']['heading'].shape)
self.assertAllEqual(
(num_boxes, 1), results['gt_attributes']['z'].shape)
self.assertAllEqual(
(num_boxes, 1), results['gt_attributes']['height'].shape)
self.assertAllEqual(
(num_boxes,), results['gt_difficulty'].shape)
if __name__ == '__main__':
tf.test.main()
| 4,519 | 42.047619 | 75 | py |
models | models-master/official/projects/pointpillars/dataloaders/parsers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data decoder and parser for Pointpillars."""
from typing import Any, Dict, List, Tuple
import tensorflow as tf
from official.projects.pointpillars.utils import utils
from official.vision.dataloaders import parser
from official.vision.ops import anchor
from official.vision.ops import preprocess_ops
class Parser(parser.Parser):
"""The class to parse decoded tensors to features and labels.
Notations:
N: number of pillars in an example
P: number of points in a pillar
D: number of features in a point
M: number of labeled boxes in an example
L: number of anchor boxes per pixel/location
"""
def __init__(self,
classes: str,
min_level: int,
max_level: int,
image_size: Tuple[int, int],
anchor_sizes: List[Tuple[float, float]],
match_threshold: float,
unmatched_threshold: float,
max_num_detections: int,
dtype: str):
"""Initialize the parser.
Args:
classes: A str to indicate which classes should be predicted.
min_level: An `int` minimum level of multiscale outputs.
max_level: An `int` maximum level of multiscale outputs.
image_size: A tuple (height, width) of image size.
anchor_sizes: A list of tuple (length, width) of anchor boxes.
match_threshold: A float number for positive anchor boxes.
unmatched_threshold: A float number for negative anchor boxes.
max_num_detections: An `int` number of maximum number of instances in an
image. The groundtruth data will be clipped/padded to the number.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
"""
self._classes = classes
self._image_size = image_size
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
self._max_num_detections = max_num_detections
self._dtype = dtype
# Generate anchors,
# multi-level anchor dict, {level: [h_1, w_l, anchors_per_location * 4]}.
self._anchor_boxes = utils.generate_anchors(min_level,
max_level,
image_size,
anchor_sizes)
def _fix_groundtruths_size(self, groundtruths: Dict[str, Any],
size: int) -> Dict[str, Any]:
"""Clips or pads the first dimension of groundtruths to the fixed size.
Args:
groundtruths: A dictionary of {`str`: `tf.Tensor`} that contains
groundtruth annotations of `classes`, `boxes`, `attributes` and
`difficulty`.
size: An `int` that specifies the expected size of the first dimension of
padded tensors.
Returns:
A dictionary of the same keys as input and padded tensors as values.
"""
groundtruths['classes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['classes'], size, -1)
groundtruths['boxes'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['boxes'], size, -1)
if 'attributes' in groundtruths:
for k, v in groundtruths['attributes'].items():
groundtruths['attributes'][
k] = preprocess_ops.clip_or_pad_to_fixed_size(v, size, -1)
groundtruths['difficulty'] = preprocess_ops.clip_or_pad_to_fixed_size(
groundtruths['difficulty'], size, -1)
return groundtruths
def _filter_level_2_labels(
self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Filter labels whose level is 2 [only for training]."""
mask = tf.where(data['gt_difficulty'] < 2)
data['gt_classes'] = tf.gather_nd(data['gt_classes'], mask)
data['gt_boxes'] = tf.gather_nd(data['gt_boxes'], mask)
for k, v in data['gt_attributes'].items():
data['gt_attributes'][k] = tf.gather_nd(v, mask)
data['gt_difficulty'] = tf.gather_nd(data['gt_difficulty'], mask)
return data
def _filter_non_class_labels(
self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Filter labels whose class is not self._classes."""
if self._classes == 'all':
return data
mask = tf.where(data['gt_classes'] == utils.CLASSES[self._classes])
data['gt_classes'] = tf.gather_nd(data['gt_classes'], mask)
data['gt_boxes'] = tf.gather_nd(data['gt_boxes'], mask)
for k, v in data['gt_attributes'].items():
data['gt_attributes'][k] = tf.gather_nd(v, mask)
data['gt_difficulty'] = tf.gather_nd(data['gt_difficulty'], mask)
# Reset 'bbox/class' to 1 to be a binary classification.
data['gt_classes'] = tf.ones_like(data['gt_classes'], dtype=tf.int32)
return data
def _parse_feature_and_label(
self, data: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Parse decoded tensors to features and labels.
Args:
data: A {name: tensor} dict of decoded tensors.
Returns:
features:
- pillars: A tensor, shape: [N, P, D], type: self._dtype
- indices: A tensor with shape: [N, 2], type: int32
labels:
- cls_targets: A {level_i: [h_i, w_i, L]} dict, type: float32
- box_targets: A {level_i: [h_i, w_i, L * 4]} dict, type: float32
- attribute_targets: A {name: {level_i: [h_i, w_i, L * 1]}} dict,
type: float32
- cls_weights: A flattened tensor with shape [total_num_anchors],
total_num_anchors is anchors across all levels, type: float32
- box_weights: A flattened tensor with shape [total_num_anchors],
total_num_anchors is anchors across all levels, type: float32
"""
data = self._filter_non_class_labels(data)
pillars = data['pillars']
indices = data['indices']
classes = data['gt_classes']
boxes = data['gt_boxes']
attributes = data['gt_attributes']
# Label anchors,
# multi-level labels, {level: [h_l, w_l, ...]}.
anchor_labeler = anchor.AnchorLabeler(self._match_threshold,
self._unmatched_threshold)
(cls_targets, box_targets, att_targets, cls_weights,
box_weights) = anchor_labeler.label_anchors(
self._anchor_boxes, boxes, tf.expand_dims(classes, axis=1), attributes)
# Casts input to desired data type.
pillars = tf.cast(pillars, dtype=self._dtype)
# Packs features and labels for model_fn outputs.
features = {
'pillars': pillars,
'indices': indices,
}
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'attribute_targets': att_targets,
'cls_weights': cls_weights,
'box_weights': box_weights,
}
return features, labels
def _parse_train_data(
self, data: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Parse data for training."""
# Skip level 2 boxes for training.
data = self._filter_level_2_labels(data)
return self._parse_feature_and_label(data)
def _parse_eval_data(
self, data: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Parse data for evaluation.
Args:
data: A {name: tensor} dict of decoded tensors.
Returns:
Other than features and labels for training, evaluation needs groundtruths
to calculate metrics.
groundtruths:
- frame_id: An int64 tensor to identify an example.
- num_detections: An `int` tensor representing the real number of boxes
used for computing metrics.
- classes: A [max_num_detections] int32 tensor
- boxes: A [max_num_detections, 4] float32 tensor
- attributes: A {name: [max_num_detections, 1]} float32 dict
- difficulty: A [max_num_detections] int32 tensor
"""
features, labels = self._parse_feature_and_label(data)
# Add for detection generator.
labels.update({
'anchor_boxes': self._anchor_boxes,
'image_shape': tf.convert_to_tensor(self._image_size),
})
# Add groundtruth for metric evaluator.
# The number of boxes to calculate evaluation metrics, will be used to
# remove padding in evaluator.
num_detections = tf.minimum(
tf.shape(data['gt_classes'])[0], self._max_num_detections)
groundtruths = {
'frame_id': data['frame_id'],
'num_detections': num_detections,
'classes': data['gt_classes'],
'boxes': data['gt_boxes'],
'attributes': data['gt_attributes'],
'difficulty': data['gt_difficulty'],
}
# Fix the size for batching
groundtruths = self._fix_groundtruths_size(groundtruths,
self._max_num_detections)
labels['groundtruths'] = groundtruths
return features, labels
| 9,308 | 38.113445 | 80 | py |
models | models-master/official/projects/pointpillars/dataloaders/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data decoder and parser for Pointpillars."""
from typing import Any, Mapping, Tuple
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.vision.dataloaders import decoder
class ExampleDecoder(decoder.Decoder):
"""The class to decode preprocessed tf.example to tensors.
Notations:
P: number of pillars in an example
N: number of points in a pillar
D: number of features in a point
M: number of labeled boxes in an example
"""
def __init__(self,
image_config: cfg.ImageConfig,
pillars_config: cfg.PillarsConfig):
"""Initialize the decoder."""
self._feature_description = {
'frame_id': tf.io.FixedLenFeature([], tf.int64),
'pillars': tf.io.FixedLenFeature([], tf.string),
'indices': tf.io.FixedLenFeature([], tf.string),
'bbox/ymin': tf.io.VarLenFeature(tf.float32),
'bbox/xmin': tf.io.VarLenFeature(tf.float32),
'bbox/ymax': tf.io.VarLenFeature(tf.float32),
'bbox/xmax': tf.io.VarLenFeature(tf.float32),
'bbox/class': tf.io.VarLenFeature(tf.int64),
'bbox/heading': tf.io.VarLenFeature(tf.float32),
'bbox/z': tf.io.VarLenFeature(tf.float32),
'bbox/height': tf.io.VarLenFeature(tf.float32),
'bbox/difficulty': tf.io.VarLenFeature(tf.int64),
}
self._pillars_config = pillars_config
def _decode_pillars(
self, parsed_tensors: Mapping[str, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Decode pillars from parsed tensors.
Args:
parsed_tensors: A {name: tensor} dict of parsed tensors.
Returns:
pillars: A tensor with shape [P, N, D]
indices: A tensor with shape [P, 2]
"""
pillars = tf.io.decode_raw(parsed_tensors['pillars'], tf.float32)
pillars = tf.reshape(pillars, [
self._pillars_config.num_pillars,
self._pillars_config.num_points_per_pillar,
self._pillars_config.num_features_per_point
])
indices = tf.io.decode_raw(parsed_tensors['indices'], tf.int32)
indices = tf.reshape(indices, [self._pillars_config.num_pillars, 2])
return pillars, indices
def _decode_boxes(self, parsed_tensors: Mapping[str, tf.Tensor]) -> tf.Tensor:
"""Decode boxes from parsed tensors.
Args:
parsed_tensors: A {name: tensor} dict of parsed tensors.
Returns:
boxes: A tensor with shape [M, 4], the last dim represents box yxyx
"""
ymin = parsed_tensors['bbox/ymin']
xmin = parsed_tensors['bbox/xmin']
ymax = parsed_tensors['bbox/ymax']
xmax = parsed_tensors['bbox/xmax']
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
return boxes
def decode(self, serialized_example: Any) -> Mapping[str, Any]:
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- frame_id: an int64 scalar tensor to identify an example.
- pillars: a float32 tensor of shape [P, N, D].
- indices: an int32 tensor of shape [P, 2].
- gt_classes: an int32 tensor of shape [M].
- gt_boxes: a float32 tensor of shape [M, 4].
- gt_attributes: a dict of (name, [M, 1]) float32 pairs.
- gt_difficulty: an int32 tensor of shape [M].
"""
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._feature_description)
# Convert sparse tensor to dense tensor.
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
# Decode features and labels.
frame_id = parsed_tensors['frame_id']
pillars, indices = self._decode_pillars(parsed_tensors)
classes = tf.cast(parsed_tensors['bbox/class'], tf.int32)
boxes = self._decode_boxes(parsed_tensors)
attr_heading = tf.expand_dims(parsed_tensors['bbox/heading'], axis=1)
attr_z = tf.expand_dims(parsed_tensors['bbox/z'], axis=1)
attr_height = tf.expand_dims(parsed_tensors['bbox/height'], axis=1)
difficulty = tf.cast(parsed_tensors['bbox/difficulty'], tf.int32)
decoded_tensors = {
'frame_id': frame_id,
'pillars': pillars,
'indices': indices,
'gt_classes': classes,
'gt_boxes': boxes,
'gt_attributes': {
'heading': attr_heading,
'z': attr_z,
'height': attr_height,
},
'gt_difficulty': difficulty,
}
return decoded_tensors
| 5,234 | 35.866197 | 80 | py |
models | models-master/official/projects/pointpillars/dataloaders/parsers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parsers."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.dataloaders import parsers
def _mock_decoded_example(num_pillars, num_points_per_pillar,
num_features_per_point, num_boxes):
frame_id = np.random.randint(0, 10, dtype=np.int64)
pillars = np.random.rand(num_pillars, num_points_per_pillar,
num_features_per_point).astype(np.float32)
indices = np.random.randint(0, 10, size=[num_pillars, 2], dtype=np.int32)
classes = np.random.randint(0, 10, size=[num_boxes], dtype=np.int32)
boxes = np.random.rand(num_boxes, 4).astype(np.float32)
heading = np.random.rand(num_boxes, 1).astype(np.float32)
z = np.random.rand(num_boxes, 1).astype(np.float32)
height = np.random.rand(num_boxes, 1).astype(np.float32)
difficulty = np.random.randint(0, 10, size=[num_boxes], dtype=np.int32)
decoded_example = {
'frame_id': tf.convert_to_tensor(frame_id, dtype=tf.int64),
'pillars': tf.convert_to_tensor(pillars, dtype=tf.float32),
'indices': tf.convert_to_tensor(indices, dtype=tf.int32),
'gt_classes': tf.convert_to_tensor(classes, dtype=tf.int32),
'gt_boxes': tf.convert_to_tensor(boxes, dtype=tf.float32),
'gt_attributes': {
'heading': tf.convert_to_tensor(heading, dtype=tf.float32),
'z': tf.convert_to_tensor(z, dtype=tf.float32),
'height': tf.convert_to_tensor(height, dtype=tf.float32),
},
'gt_difficulty': tf.convert_to_tensor(difficulty, dtype=tf.int32),
}
return decoded_example
class ParserTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('all', 1, 10, True),
('vehicle', 10, 2, True),
('pedestrian', 1, 10, False),
('cyclist', 10, 2, False),
)
def test_shape(self, classes, num_boxes, max_num_boxes, is_training):
min_level = 1
max_level = 3
image_size = (32, 32)
anchor_sizes = [(1.1, 2.2)]
num_anchors_per_location = len(anchor_sizes)
match_threshold = 0.5
unmatched_threshold = 0.5
parser = parsers.Parser(classes, min_level, max_level, image_size,
anchor_sizes, match_threshold, unmatched_threshold,
max_num_boxes, 'float32')
num_pillars = 2
num_points_per_pillar = 3
num_features_per_point = 4
decoded_example = _mock_decoded_example(num_pillars, num_points_per_pillar,
num_features_per_point, num_boxes)
features, labels = parser.parse_fn(is_training=is_training)(
decoded_tensors=decoded_example)
features = tf.nest.map_structure(lambda x: x.numpy(), features)
labels = tf.nest.map_structure(lambda x: x.numpy(), labels)
self.assertAllEqual(
(num_pillars, num_points_per_pillar, num_features_per_point),
features['pillars'].shape)
self.assertAllEqual(
(num_pillars, 2), features['indices'].shape)
total_num_anchors = 0
for level in range(min_level, max_level + 1):
stride = 2**level
h_i = image_size[0] / stride
w_i = image_size[1] / stride
total_num_anchors += h_i * w_i * num_anchors_per_location
self.assertAllEqual((h_i, w_i, num_anchors_per_location),
labels['cls_targets'][str(level)].shape)
self.assertAllEqual((h_i, w_i, num_anchors_per_location * 4),
labels['box_targets'][str(level)].shape)
self.assertAllEqual(
(h_i, w_i, num_anchors_per_location),
labels['attribute_targets']['heading'][str(level)].shape)
self.assertAllEqual(
(h_i, w_i, num_anchors_per_location),
labels['attribute_targets']['height'][str(level)].shape)
self.assertAllEqual(
(h_i, w_i, num_anchors_per_location),
labels['attribute_targets']['z'][str(level)].shape)
if not is_training:
self.assertAllEqual((h_i, w_i, num_anchors_per_location * 4),
labels['anchor_boxes'][str(level)].shape)
self.assertAllEqual((total_num_anchors,),
labels['cls_weights'].shape)
self.assertAllEqual((total_num_anchors,),
labels['box_weights'].shape)
if not is_training:
self.assertAllEqual((2,), labels['image_shape'].shape)
groundtruths = labels['groundtruths']
self.assertEmpty(groundtruths['frame_id'].shape)
self.assertEmpty(groundtruths['num_detections'].shape)
self.assertAllEqual(
(max_num_boxes,), groundtruths['classes'].shape)
self.assertAllEqual(
(max_num_boxes, 4), groundtruths['boxes'].shape)
self.assertAllEqual(
(max_num_boxes, 1), groundtruths['attributes']['heading'].shape)
self.assertAllEqual(
(max_num_boxes, 1), groundtruths['attributes']['height'].shape)
self.assertAllEqual(
(max_num_boxes, 1), groundtruths['attributes']['z'].shape)
self.assertAllEqual(
(max_num_boxes,), groundtruths['difficulty'].shape)
if __name__ == '__main__':
tf.test.main()
| 5,757 | 41.029197 | 79 | py |
models | models-master/official/projects/pointpillars/utils/wod_processor.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to process waymo open dataset."""
from typing import Any, List, Mapping, Optional, Sequence, Tuple
import zlib
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.utils import utils
from official.vision.data.tfrecord_lib import convert_to_feature
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset import label_pb2
from waymo_open_dataset.utils import frame_utils
# The minimum length of required labeling boxes.
_MIN_BOX_LENGTH = 1e-2
# The seed for random generator.
_RANDOM_SEED = 42
class WodProcessor:
"""The class to process waymo-open-dataset-tf-2-6-0.
https://github.com/waymo-research/waymo-open-dataset
"""
def __init__(self,
image_config: cfg.ImageConfig,
pillars_config: cfg.PillarsConfig):
self._x_range = image_config.x_range
self._y_range = image_config.y_range
self._z_range = image_config.z_range
self._resolution = image_config.resolution
self._one_over_resolution = 1.0 / self._resolution
self._image_height = image_config.height
self._image_width = image_config.width
self._vehicle_xy = utils.get_vehicle_xy(image_height=image_config.height,
image_width=image_config.width,
x_range=image_config.x_range,
y_range=image_config.y_range)
self._num_pillars = pillars_config.num_pillars
self._num_points_per_pillar = pillars_config.num_points_per_pillar
self._num_features_per_point = pillars_config.num_features_per_point
self._rng = np.random.default_rng(seed=_RANDOM_SEED)
def _parse_range_image_and_top_pose(
self, frame: dataset_pb2.Frame
) -> Tuple[Mapping[int, List[dataset_pb2.MatrixFloat]],
Optional[dataset_pb2.MatrixFloat]]:
"""Parse range images and top pose given a frame.
Args:
frame: A frame message in wod dataset.proto.
Returns:
range_images: A dict of {laser_name: [range_image_return]},
each range_image_return is a MatrixFloat with shape (H, W, 4).
range_image_top_pose: Range image pixel pose for top lidar,
a MatrixFloat with shape (H, W, 6).
"""
range_images = {}
range_image_top_pose = None
# Parse lidar laser data from two returns, ri_return1 is the first return,
# ri_return2 is the second return. Also get the top lidar pose from the
# first return of the top lidar.
for laser in frame.lasers:
if laser.ri_return1.range_image_compressed:
ri_str = zlib.decompress(laser.ri_return1.range_image_compressed)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(ri_str)
range_images[int(laser.name)] = [ri]
if laser.name == dataset_pb2.LaserName.TOP:
pos_str = zlib.decompress(
laser.ri_return1.range_image_pose_compressed)
range_image_top_pose = dataset_pb2.MatrixFloat()
range_image_top_pose.ParseFromString(pos_str)
if laser.ri_return2.range_image_compressed:
ri_str = zlib.decompress(laser.ri_return2.range_image_compressed)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(ri_str)
range_images[int(laser.name)].append(ri)
return range_images, range_image_top_pose
def _convert_range_image_to_point_cloud(
self,
frame: dataset_pb2.Frame,
range_images: Mapping[int, List[dataset_pb2.MatrixFloat]],
range_image_top_pose: dataset_pb2.MatrixFloat,
ri_index: int) -> np.ndarray:
"""Convert range images (polar) to point cloud (Cartesian).
Args:
frame: A frame message in wod dataset.proto.
range_images: A dict of {laser_name: [range_image_return]}.
range_image_top_pose: Range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
point_cloud: a np array with shape (M, F),
each point has F attributes [x, y, z, intensity, elongation].
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
point_cloud = []
cartesian_tensor = frame_utils.convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index, False)
for calibration in calibrations:
# Get range_image for this lidar calibration.
range_image = range_images[calibration.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
# Stack xyz, intensity, elongation together.
xyz_tensor = cartesian_tensor[calibration.name]
intensity_tensor = range_image_tensor[..., 1:2]
elongation_tensor = range_image_tensor[..., 2:3]
points_tensor = tf.concat(
[xyz_tensor, intensity_tensor, elongation_tensor], axis=-1)
# Only select points if:
# 1. its range is greater than 0m, and
# 2. it is not in any no-label-zone
distance_mask = range_image_tensor[..., 0] > 0
nlz_mask = range_image_tensor[..., 3] == -1.0
mask = tf.logical_and(distance_mask, nlz_mask)
points_tensor = tf.gather_nd(points_tensor, tf.where(mask))
point_cloud.append(points_tensor.numpy())
point_cloud = np.concatenate(point_cloud, axis=0)
# Shuffle points to make the order independent to the range image.
# Otherwise, the pillars close to the auto vehicle would be empty if distant
# pillars have exceeded the maximum number.
self._rng.shuffle(point_cloud)
return point_cloud
def extract_point_cloud(
self, frame: dataset_pb2.Frame) -> Tuple[np.ndarray, np.ndarray]:
"""Extract point cloud from frame proto.
Args:
frame: A frame message in wod dataset.proto.
Returns:
points: The point cloud, a float array with shape (M, F).
points_location: The pseudo image col/row of points, an array (M, 2),
col/row, int32.
"""
# Get point cloud from range images
range_images, range_image_top_pose = self._parse_range_image_and_top_pose(
frame)
points_r1 = self._convert_range_image_to_point_cloud(
frame, range_images, range_image_top_pose, 0)
points_r2 = self._convert_range_image_to_point_cloud(
frame, range_images, range_image_top_pose, 1)
points = np.concatenate([points_r1, points_r2], axis=0)
# Get image col/row of points
points_location = utils.frame_to_image_coord(
points[:, 0:2], self._vehicle_xy, self._one_over_resolution)
# Select points locating inside the range.
selection = np.where((points_location[:, 0] >= 0) &
(points_location[:, 0] < self._image_width) &
(points_location[:, 1] >= 0) &
(points_location[:, 1] < self._image_height) &
(points[:, 2] >= self._z_range[0]) &
(points[:, 2] <= self._z_range[1]))
points = points[selection]
points_location = points_location[selection]
return points, points_location
def compute_pillars(
self,
points: np.ndarray,
points_location: np.ndarray) -> Tuple[tf.Tensor, tf.Tensor, int]:
"""Compute pillars from point cloud.
Args:
points: The point cloud, a np array with shape (M, F).
points_location: The pseudo image col/row of points, a np array (M, 2).
Returns:
pillar_features: A tensor with shape (P, N, D).
pillar_indices: A tensor with shape (P, 2), row/col, int32.
pillars_count: The number of computed pillars before pad/trim.
Notations:
h: image height
w: image widht
p: number of pillars per example after trimming or padding
n: number of points per pillar
d: number of features per point after processing
f: number of features per point before processing
k: number of pillars before trimming or padding
"""
h, w = self._image_height, self._image_width
p, n, d = (self._num_pillars, self._num_points_per_pillar,
self._num_features_per_point)
f = points.shape[-1]
grid_num_points = np.zeros((h, w), dtype=np.int32)
grid_locations = np.zeros((h, w, 2), dtype=np.int32)
grid_points = np.zeros((h, w, n, f), dtype=np.float32)
# Fill points into 2D grid.
for i, (point, (c, r)) in enumerate(zip(points, points_location)):
point_count = grid_num_points[r][c]
if point_count == n:
continue
grid_num_points[r][c] += 1
grid_locations[r][c] = [c, r]
grid_points[r][c][point_count][:] = point[:]
# Select k non-empty pillars randomly.
selection = np.where(grid_num_points > 0)
selection = [(i, j) for i, j in zip(selection[0], selection[1])]
self._rng.shuffle(selection)
selection = ([i[0] for i in selection], [i[1] for i in selection])
k = len(selection[0])
# (k,)
pillar_num_points = grid_num_points[selection]
# (k, 2)
pillar_locations = grid_locations[selection]
# (k, n, f)
pillar_points = grid_points[selection]
# Pad or trim to p pillars.
# (p,)
pillar_num_points = utils.pad_or_trim_to_shape(pillar_num_points, [p])
# (p, 2)
pillar_locations = utils.pad_or_trim_to_shape(pillar_locations, [p, 2])
# (p, n, f)
pillar_points = utils.pad_or_trim_to_shape(pillar_points, [p, n, f])
# Compute pillar features.
# (p, n, 3)
pillar_xyz = pillar_points[..., 0:3]
# (p, n, f-3)
pillar_others = pillar_points[..., 3:]
# (p, 1, 3)
pillar_sum_xyz = np.sum(pillar_xyz, axis=1, keepdims=True)
num_points = np.maximum(
pillar_num_points, 1.0, dtype=np.float32).reshape(p, 1, 1)
pillar_mean_xyz = pillar_sum_xyz / num_points
# (p, n, 3)
pillar_dxyz = pillar_xyz - pillar_mean_xyz
# (p, 1, 2)
pillar_center_xy = utils.image_to_frame_coord(
pillar_locations, self._vehicle_xy, self._resolution).reshape(p, 1, 2)
# Concat all features together, (k, n, d).
pillar_features = np.concatenate([
pillar_dxyz,
pillar_others,
np.tile(pillar_mean_xyz, (1, n, 1)),
np.tile(pillar_center_xy, (1, n, 1))], axis=-1)
# Get pillar indices [row, col], (k, 2).
pillar_locations[:, [0, 1]] = pillar_locations[:, [1, 0]]
utils.assert_shape(pillar_features, [p, n, d])
utils.assert_shape(pillar_locations, [p, 2])
pillar_features = tf.convert_to_tensor(pillar_features, dtype=tf.float32)
pillar_locations = tf.convert_to_tensor(pillar_locations, dtype=tf.int32)
return pillar_features, pillar_locations, k
def _adjust_label_type(self, label: label_pb2.Label) -> int:
# Only care about (vehicle, pedestrian, cyclist) types, override sign type
# with cyclist. After this, the types mapping would be:
# 0: unknown, 1: vehicle, 2: pedestrian, 3: cyclist
if label.type == label_pb2.Label.TYPE_CYCLIST:
return 3
return int(label.type)
def _adjust_difficulty_level(self, label: label_pb2.Label) -> int:
# Extend level-2 difficulty labels with boxes which have very little lidar
# points, since the model is a single modality (lidar) model.
if (label.num_lidar_points_in_box <= 5 or
label.detection_difficulty_level == label_pb2.Label.LEVEL_2):
return 2
return 1
def extract_labels(self, frame: dataset_pb2.Frame) -> Sequence[tf.Tensor]:
"""Extract bounding box labels from frame proto.
Args:
frame: A frame message in wod dataset.proto.
Returns:
labels: A sequence of processed tensors.
"""
xmin = []
xmax = []
ymin = []
ymax = []
classes = []
heading = []
z = []
height = []
difficulty = []
for label in frame.laser_labels:
box = label.box
# Skip boxes if it doesn't contain any lidar points.
# WARNING: Do not enable this filter when using v.1.0.0 data.
if label.num_lidar_points_in_box == 0:
continue
# Skip boxes if it's type is SIGN.
if label.type == label_pb2.Label.TYPE_SIGN:
continue
# Skip boxes if its z is out of range.
half_height = box.height * 0.5
if (box.center_z - half_height < self._z_range[0] or
box.center_z + half_height > self._z_range[1]):
continue
# Get boxes in image coordinate.
frame_box = np.array([[box.center_x, box.center_y, box.length,
box.width]])
image_box = utils.frame_to_image_boxes(frame_box, self._vehicle_xy,
self._one_over_resolution)
# Skip empty boxes.
image_box = utils.clip_boxes(image_box, self._image_height,
self._image_width)[0]
y0, x0, y1, x1 = image_box
if np.abs(y0 - y1) < _MIN_BOX_LENGTH or np.abs(x0 - x1) < _MIN_BOX_LENGTH:
continue
label_cls = self._adjust_label_type(label)
level = self._adjust_difficulty_level(label)
classes.append(label_cls)
ymin.append(y0)
xmin.append(x0)
ymax.append(y1)
xmax.append(x1)
heading.append(box.heading)
z.append(box.center_z)
height.append(box.height)
difficulty.append(level)
classes = tf.convert_to_tensor(classes, dtype=tf.int32)
ymin = tf.convert_to_tensor(ymin, dtype=tf.float32)
xmin = tf.convert_to_tensor(xmin, dtype=tf.float32)
ymax = tf.convert_to_tensor(ymax, dtype=tf.float32)
xmax = tf.convert_to_tensor(xmax, dtype=tf.float32)
heading = tf.convert_to_tensor(heading, dtype=tf.float32)
z = tf.convert_to_tensor(z, dtype=tf.float32)
height = tf.convert_to_tensor(height, dtype=tf.float32)
difficulty = tf.convert_to_tensor(difficulty, dtype=tf.int32)
# NOTE: This function might be called by an online data loader in a
# tf.py_function wrapping fashion. But tf.py_function doesn't support
# dict return type, so we have to return a sequence of unpacked.
return classes, ymin, xmin, ymax, xmax, heading, z, height, difficulty
def process_one_frame(self, frame: dataset_pb2.Frame) -> Sequence[Any]:
"""Compute features and labels.
Args:
frame: A frame message in wod dataset.proto.
Returns:
labels: A sequence of processed tensors.
"""
timestamp = frame.timestamp_micros
timestamp = tf.convert_to_tensor(timestamp, dtype=tf.int64)
points, points_location = self.extract_point_cloud(frame)
pillars, indices, _ = self.compute_pillars(points, points_location)
(classes, ymin, xmin, ymax, xmax, heading, z, height,
difficulty) = self.extract_labels(frame)
# NOTE: This function might be called by an online data loader in a
# tf.py_function wrapping fashion. But tf.py_function doesn't support
# dict return type, so we have to return a sequence of unpacked.
return (timestamp, pillars, indices, classes, ymin, xmin, ymax, xmax,
heading, z, height, difficulty)
def process_and_convert_to_tf_example(
self, frame: dataset_pb2.Frame) -> tf.train.Example:
"""Processes one wod source tfrecord.
Args:
frame: The parsed wod frame proto.
Returns:
example: The tf example converted from frame.
"""
(timestamp, pillars, indices, classes, ymin, xmin, ymax, xmax,
heading, z, height, difficulty) = self.process_one_frame(frame)
feature = {
'frame_id': convert_to_feature(timestamp.numpy(), 'int64'),
'pillars': convert_to_feature(pillars.numpy().tobytes(), 'bytes'),
'indices': convert_to_feature(indices.numpy().tobytes(), 'bytes'),
'bbox/class': convert_to_feature(classes.numpy(), 'int64_list'),
'bbox/ymin': convert_to_feature(ymin.numpy(), 'float_list'),
'bbox/xmin': convert_to_feature(xmin.numpy(), 'float_list'),
'bbox/ymax': convert_to_feature(ymax.numpy(), 'float_list'),
'bbox/xmax': convert_to_feature(xmax.numpy(), 'float_list'),
'bbox/heading': convert_to_feature(heading.numpy(), 'float_list'),
'bbox/z': convert_to_feature(z.numpy(), 'float_list'),
'bbox/height': convert_to_feature(height.numpy(), 'float_list'),
'bbox/difficulty': convert_to_feature(difficulty.numpy(), 'int64_list'),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
return example
| 17,005 | 38.094253 | 80 | py |
models | models-master/official/projects/pointpillars/utils/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utility functions for pointpillars."""
import collections
from typing import Any, List, Mapping, Tuple
import numpy as np
import tensorflow as tf
CLASSES = {'vehicle': 1, 'pedestrian': 2, 'cyclist': 3}
def assert_shape(x: np.ndarray, shape: List[int]):
if tuple(x.shape) != tuple(shape):
raise ValueError('Shape of array should be {}, but {} found'.format(
shape, x.shape))
def assert_channels_last():
if tf.keras.backend.image_data_format() != 'channels_last':
raise ValueError('Only "channels_last" mode is supported')
def pad_or_trim_to_shape(x: np.ndarray, shape: List[int]) -> np.ndarray:
"""Pad and trim x to the specified shape, x should have same rank as shape.
Args:
x: An np array.
shape: A list of int indicating a array shape.
Returns:
y: An np array with padded/trimmed shape.
"""
shape = np.array(shape)
# Try to pad from end
pad_end = shape - np.minimum(x.shape, shape)
pad_begin = np.zeros_like(pad_end)
padder = np.stack([pad_begin, pad_end], axis=1)
x = np.pad(x, padder)
# Try to trim from end.
slice_end = shape
slice_begin = np.zeros_like(slice_end)
slicer = tuple(map(slice, slice_begin, slice_end))
y = x[slicer].reshape(shape)
return y
def clip_boxes(boxes: np.ndarray, image_height: int,
image_width: int) -> np.ndarray:
"""Clip boxes to image boundaries.
Args:
boxes: An np array of boxes, [y0, x0, y1, y1].
image_height: An int of image height.
image_width: An int of image width.
Returns:
clipped_boxes: An np array of boxes, [y0, x0, y1, y1].
"""
max_length = [image_height, image_width, image_height, image_width]
clipped_boxes = np.maximum(np.minimum(boxes, max_length), 0.0)
return clipped_boxes
def get_vehicle_xy(image_height: int, image_width: int,
x_range: Tuple[float, float],
y_range: Tuple[float, float]) -> Tuple[int, int]:
"""Get vehicle x/y in image coordinate.
Args:
image_height: A float of image height.
image_width: A float of image width.
x_range: A float tuple of (-x, +x).
y_range: A float tuple of (-y, +x).
Returns:
vehicle_xy: An int tuple of (col, row).
"""
vehicle_col = (image_width * (-x_range[0] / (-x_range[0] + x_range[1])))
vehicle_row = (image_height * (-y_range[0] / (-y_range[0] + y_range[1])))
vehicle_xy = (int(vehicle_col), int(vehicle_row))
return vehicle_xy
def frame_to_image_coord(frame_xy: np.ndarray, vehicle_xy: Tuple[int, int],
one_over_resolution: float) -> np.ndarray:
"""Convert float frame (x, y) to int image (x, y).
Args:
frame_xy: An np array of frame xy coordinates.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
one_over_resolution: A float of one over image resolution.
Returns:
image_xy: An np array of image xy cooridnates.
"""
image_xy = np.floor(frame_xy * one_over_resolution).astype(np.int32)
image_xy[..., 0] += vehicle_xy[0]
image_xy[..., 1] = vehicle_xy[1] - 1 - image_xy[..., 1]
return image_xy
def image_to_frame_coord(image_xy: np.ndarray, vehicle_xy: Tuple[int, int],
resolution: float) -> np.ndarray:
"""Convert int image (x, y) to float frame (x, y).
Args:
image_xy: An np array of image xy cooridnates.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
resolution: A float of image resolution.
Returns:
frame_xy: An np array of frame xy coordinates.
"""
frame_xy = image_xy.astype(np.float32)
frame_xy[..., 0] = (frame_xy[..., 0] - vehicle_xy[0]) * resolution
frame_xy[..., 1] = (vehicle_xy[1] - 1 - frame_xy[..., 1]) * resolution
return frame_xy
def frame_to_image_boxes(frame_boxes: Any, vehicle_xy: Tuple[int, int],
one_over_resolution: float) -> Any:
"""Convert boxes from frame coordinate to image coordinate.
Args:
frame_boxes: A [N, 4] array or tensor, [center_x, center_y, length, width]
in frame coordinate.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
one_over_resolution: A float number, 1.0 / resolution.
Returns:
image_boxes: A [N, 4] array or tensor, [ymin, xmin, ymax, xmax] in image
coordinate.
"""
center_x = frame_boxes[..., 0]
center_y = frame_boxes[..., 1]
box_length = frame_boxes[..., 2]
box_width = frame_boxes[..., 3]
image_box_length = box_length * one_over_resolution
image_box_width = box_width * one_over_resolution
image_box_center_x = (center_x * one_over_resolution + vehicle_xy[0])
image_box_center_y = (vehicle_xy[1] - 1 - center_y * one_over_resolution)
ymin = image_box_center_y - image_box_width * 0.5
xmin = image_box_center_x - image_box_length * 0.5
ymax = image_box_center_y + image_box_width * 0.5
xmax = image_box_center_x + image_box_length * 0.5
image_boxes = np.stack([ymin, xmin, ymax, xmax], axis=-1)
return image_boxes
def image_to_frame_boxes(image_boxes: Any, vehicle_xy: Tuple[float],
resolution: float) -> Any:
"""Convert boxes from image coordinate to frame coordinate.
Args:
image_boxes: A [N, 4] array or tensor, [ymin, xmin, ymax, xmax] in image
coordinate.
vehicle_xy: A float tuple of (vehicle_x, vehicle_y) in image.
resolution: A float number representing pillar grid resolution.
Returns:
frame_boxes: A [N, 4] array or tensor, [center_x, center_y, length, width]
in frame coordinate.
"""
ymin = image_boxes[..., 0]
xmin = image_boxes[..., 1]
ymax = image_boxes[..., 2]
xmax = image_boxes[..., 3]
image_box_length = xmax - xmin
image_box_width = ymax - ymin
image_box_center_x = xmin + image_box_length * 0.5
image_box_center_y = ymin + image_box_width * 0.5
center_x = (image_box_center_x - vehicle_xy[0]) * resolution
center_y = (vehicle_xy[1] - 1 - image_box_center_y) * resolution
box_length = image_box_length * resolution
box_width = image_box_width * resolution
frame_boxes = np.stack([center_x, center_y, box_length, box_width], axis=-1)
return frame_boxes
def clip_heading(heading: Any) -> Any:
"""Clip heading to the range [-pi, pi]."""
heading = tf.nest.map_structure(lambda x: np.pi * tf.tanh(x), heading)
return heading
def wrap_angle_rad(angles_rad: Any,
min_val: float = -np.pi,
max_val: float = np.pi) -> Any:
"""Wrap the value of `angles_rad` to the range [min_val, max_val]."""
max_min_diff = max_val - min_val
return min_val + tf.math.floormod(angles_rad + max_val, max_min_diff)
def generate_anchors(min_level: int, max_level: int, image_size: Tuple[int],
anchor_sizes: List[Tuple[float]]) -> Mapping[str, Any]:
"""Generate anchor boxes without scale to level stride.
Args:
min_level: integer number of minimum level of the output.
max_level: integer number of maximum level of the output.
image_size: a tuple (image_height, image_width).
anchor_sizes: a list of tuples, each tuple is (anchor_length, anchor_width).
Returns:
boxes_all: a {level: boxes_i} dict, each boxes_i is a [h_i, w_i, 4] tensor
for boxes at level i, each box is (ymin, xmin, ymax, xmax).
Notations:
k: length of anchor_sizes, the number of indicated anchors.
w: the image width at a specific level.
h: the image height at a specifc level.
"""
# Prepare k anchors' lengths and widths
k = len(anchor_sizes)
# (k,)
anchor_lengths = []
anchor_widths = []
for anchor_size in anchor_sizes:
anchor_lengths.append(anchor_size[0])
anchor_widths.append(anchor_size[1])
anchor_lengths = tf.convert_to_tensor(anchor_lengths, dtype=tf.float32)
anchor_widths = tf.convert_to_tensor(anchor_widths, dtype=tf.float32)
# (1, 1, k)
half_anchor_lengths = tf.reshape(0.5 * anchor_lengths, [1, 1, k])
half_anchor_widths = tf.reshape(0.5 * anchor_widths, [1, 1, k])
boxes_all = collections.OrderedDict()
for level in range(min_level, max_level + 1):
# Generate anchor boxes for this level with stride.
boxes_i = []
stride = 2 ** level
# (w,)
x = tf.range(stride / 2, image_size[1], stride, dtype=tf.float32)
# (h,)
y = tf.range(stride / 2, image_size[0], stride, dtype=tf.float32)
# (h, w)
xv, yv = tf.meshgrid(x, y)
# (h, w, 1)
xv = tf.expand_dims(xv, axis=-1)
yv = tf.expand_dims(yv, axis=-1)
# (h, w, k, 1)
y_min = tf.expand_dims(yv - half_anchor_widths, axis=-1)
y_max = tf.expand_dims(yv + half_anchor_widths, axis=-1)
x_min = tf.expand_dims(xv - half_anchor_lengths, axis=-1)
x_max = tf.expand_dims(xv + half_anchor_lengths, axis=-1)
# (h, w, k, 4)
boxes_i = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
# [h, w, k * 4]
shape = boxes_i.shape.as_list()
boxes_i = tf.reshape(boxes_i, [shape[0], shape[1], shape[2] * shape[3]])
boxes_all[str(level)] = boxes_i
return boxes_all
| 9,549 | 33.981685 | 80 | py |
models | models-master/official/projects/pointpillars/utils/wod_detection_evaluator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection evaluator for the Waymo Open Dataset."""
import abc
from typing import Any, Mapping
from absl import logging
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.utils import utils
from waymo_open_dataset import label_pb2
from waymo_open_dataset.metrics.python import wod_detection_evaluator
np.set_printoptions(precision=4, suppress=True)
class _AbstractEvaluator(
wod_detection_evaluator.WODDetectionEvaluator, metaclass=abc.ABCMeta):
"""WOD detection evaluation metric base class."""
def __init__(self, model_config, config=None):
super().__init__(config=config)
image_config = model_config.image
self._resolution = image_config.resolution
self._vehicle_xy = utils.get_vehicle_xy(image_config.height,
image_config.width,
image_config.x_range,
image_config.y_range)
self._classes = model_config.classes
def _remove_padding(self, tensor_dict: Mapping[str, Any],
num_valid: int) -> Mapping[str, Any]:
"""Remove the paddings of the prediction/groundtruth data."""
result_tensor_dict = {}
gather_indices = tf.range(num_valid)
for k, v in tensor_dict.items():
if v.shape[0] < num_valid:
raise ValueError(
'{} does not have enough elements to gather, {} < {}'.format(
k, v.shape[0], num_valid))
result_tensor_dict[k] = tf.gather(v, gather_indices)
return result_tensor_dict
def _compact_tensors(self,
tensor_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Compact tensors by concatenating them in tuples."""
compact_tensor_dict = {}
for k, v in tensor_dict.items():
if isinstance(v, tuple):
compact_tensor_dict[k] = tf.concat(v, axis=0)
elif isinstance(v, dict):
compact_tensor_dict[k] = v
for dk, dv in v.items():
if isinstance(dv, tuple):
compact_tensor_dict[k][dk] = tf.concat(dv, axis=0)
else:
compact_tensor_dict[k] = v
return compact_tensor_dict
def _adjust_class(self, tensor_dict: Mapping[str, Any]) -> tf.Tensor:
"""Change predicted class to what defiend by label.proto."""
original_type = tf.cast(tensor_dict['classes'], tf.uint8)
if self._classes == 'all':
adjusted_type = tf.where(
tf.equal(original_type, 3),
tf.ones_like(original_type) * 4,
original_type)
else:
adjusted_type = tf.where(
tf.equal(original_type, 1),
tf.ones_like(original_type) * utils.CLASSES[self._classes],
original_type)
return adjusted_type
@abc.abstractmethod
def _get_box(self, box2d: tf.Tensor, attributes: Mapping[str, tf.Tensor]):
"""Get box from yxyx and attributes.
Args:
box2d: a [N, 4] tensor encoding as (ymin, xmin, ymax, xmax)
attributes: a {name: [N, 1]} dict
Returns:
box: a tensor representing a 2d or 3d box
"""
def update_state(self,
groundtruths: Mapping[str, tf.Tensor],
predictions: Mapping[str, tf.Tensor]):
"""Update the metrics state with prediction and groundtruth data.
Notations:
B: batch size.
N: number of ground truth boxes.
M: number of predicted boxes.
T: attribute size.
Args:
groundtruths: a dictionary of Tensors including the fields below.
Required fields:
- frame_id: a tensor of int64 of shape [B].
- num_detections: a tensor of int32 of shape [B].
- boxes: a tensor of float32 of shape [B, N, 4],
(ymin, xmin, ymax, xmax).
- classes: a tensor of int32 of shape [B, N].
- attributes: a dict of tensor of float32 of shape [B, N, T].
- difficulties: a tensor of int32 of shape [B, N].
predictions: a dictionary of tensors including the fields below.
Required fields:
- num_detections: a tensor of int32 of shape [B].
- boxes: a tensor of float32 of shape [B, M, 4],
(ymin, xmin, ymax, xmax).
- scores: a tensor of float32 of shape [B, M].
- classes: a tensor of int32 of shape [B, M].
- attributes: a dict of tensor of float32 of shape [B, M, T].
"""
# Remove tuples from dataset.
groundtruths = self._compact_tensors(groundtruths)
predictions = self._compact_tensors(predictions)
# Adjust type.
gt_type = self._adjust_class(groundtruths)
pred_type = self._adjust_class(predictions)
batch_size = tf.shape(groundtruths['frame_id'])[0]
for i in tf.range(batch_size):
# Set ground truths
gt_num_detections = groundtruths['num_detections'][i]
gt_attributes = {}
for k, v in groundtruths['attributes'].items():
gt_attributes[k] = v[i]
frame_groundtruths = {
'ground_truth_frame_id':
tf.tile([groundtruths['frame_id'][i]], [gt_num_detections]),
'ground_truth_bbox':
self._get_box(groundtruths['boxes'][i], gt_attributes),
'ground_truth_type':
gt_type[i],
'ground_truth_difficulty':
tf.cast(groundtruths['difficulty'][i], tf.uint8),
}
frame_groundtruths = self._remove_padding(
frame_groundtruths, gt_num_detections)
# Set predictions
pred_num_detections = predictions['num_detections'][i]
pred_attributes = {}
for k, v in predictions['attributes'].items():
pred_attributes[k] = v[i]
frame_predictions = {
'prediction_frame_id':
tf.tile([groundtruths['frame_id'][i]], [pred_num_detections]),
'prediction_bbox':
self._get_box(predictions['boxes'][i], pred_attributes),
'prediction_type':
pred_type[i],
'prediction_score':
predictions['scores'][i],
'prediction_overlap_nlz':
tf.zeros_like(predictions['scores'][i], dtype=tf.bool)
}
frame_predictions = self._remove_padding(
frame_predictions, pred_num_detections)
# Update state for this frame.
super().update_state(frame_groundtruths, frame_predictions)
def evaluate(self) -> Mapping[str, Any]:
"""Compute the final metrics.
Returns:
metric_dict: A dict of metrics, contains following breakdown keys:
mAP/{class}_level_1
mAP/{class}_[0, 30)_level_1
mAP/{class}_[30, 50)_level_1
mAP/{class}_[50, +inf)_level_1
mAP/{class}_level_2
mAP/{class}_[0, 30)_level_2
mAP/{class}_[30, 50)_level_2
mAP/{class}_[50, +inf)_level_2
mAPH/{class}_level_1
mAPH/{class}_[0, 30)_level_1
mAPH/{class}_[30, 50)_level_1
mAPH/{class}_[50, +inf)_level_1
mAPH/{class}_level_2
mAPH/{class}_[0, 30)_level_2
mAPH/{class}_[30, 50)_level_2
mAPH/{class}_[50, +inf)_level_2
It also contains following keys used as public NAS rewards.
AP
APH
"""
ap, aph, _, _, _, _, _ = super().evaluate()
metric_dict = {}
for i, name in enumerate(self._breakdown_names):
# Skip sign metrics since we don't use this type.
if 'SIGN' in name:
continue
# Make metric name more readable.
name = name.lower()
for c in utils.CLASSES:
pos = name.find(c)
if pos != -1:
name = name[pos:]
if self._classes == 'all' or self._classes in name:
metric_dict['mAP/{}'.format(name)] = ap[i]
metric_dict['mAPH/{}'.format(name)] = aph[i]
# Set public metrics as AP and APH.
if self._classes == 'all':
ap, aph = 0, 0
for c in utils.CLASSES:
ap += metric_dict['mAP/{}_level_1'.format(c)]
aph += metric_dict['mAPH/{}_level_1'.format(c)]
metric_dict['AP'] = ap / len(utils.CLASSES)
metric_dict['APH'] = aph / len(utils.CLASSES)
else:
metric_dict['AP'] = metric_dict['mAP/{}_level_1'.format(self._classes)]
metric_dict['APH'] = metric_dict['mAPH/{}_level_1'.format(self._classes)]
return metric_dict
class Wod3dDetectionEvaluator(_AbstractEvaluator):
"""WOD 3D detection evaluation metric class."""
def _get_box(self, box2d: tf.Tensor,
attributes: Mapping[str, tf.Tensor]) -> tf.Tensor:
"""Get box from yxyx and attributes.
Args:
box2d: a float32 [N, 4] tensor encoding as (ymin, xmin, ymax, xmax)
attributes: a float32 {name: [N, 1]} dict
Returns:
box: a float32 [N, 7] tensor representing a 3d box
"""
box2d = utils.image_to_frame_boxes(box2d, self._vehicle_xy,
self._resolution)
values = []
values.append(box2d[:, 0]) # center_x
values.append(box2d[:, 1]) # center_y
values.append(attributes['z'][:, 0]) # center_z
values.append(box2d[:, 2]) # length
values.append(box2d[:, 3]) # width
values.append(attributes['height'][:, 0]) # height
values.append(attributes['heading'][:, 0]) # heading
box3d = tf.stack(values, axis=-1)
return box3d
class Wod2dDetectionEvaluator(_AbstractEvaluator):
"""WOD 2D detection evaluation metric class."""
def __init__(self, image_config: Any, config: Any = None):
if config is None:
config = self._get_default_config()
config.box_type = label_pb2.Label.Box.TYPE_2D
super().__init__(image_config, config)
# use utils
def _get_box(self, box2d: tf.Tensor,
attributes: Mapping[str, tf.Tensor]) -> tf.Tensor:
"""Get box from yxyx and attributes.
Args:
box2d: a float32 [N, 4] tensor encoding as (ymin, xmin, ymax, xmax)
attributes: a float32 {name: [N, 1]} dict
Returns:
box: a float32 [N, 5] tensor representing a 2d box with heading
"""
box2d = utils.image_to_frame_boxes(box2d, self._vehicle_xy,
self._resolution)
values = []
values.append(box2d[:, 0]) # center_x
values.append(box2d[:, 1]) # center_y
values.append(box2d[:, 2]) # length
values.append(box2d[:, 3]) # width
values.append(attributes['heading'][:, 0]) # heading
box2d_h = tf.stack(values, axis=-1)
return box2d_h
def create_evaluator(model_config: cfg.PointPillarsModel) -> _AbstractEvaluator:
"""Create either 2d or 3d evaluator."""
attr_count = len(model_config.head.attribute_heads)
if attr_count == 1:
logging.info('Use 2D detection evaluator.')
return Wod2dDetectionEvaluator(model_config)
if attr_count == 3:
logging.info('Use 3D detection evaluator.')
return Wod3dDetectionEvaluator(model_config)
raise ValueError(
'The length of attribute_heads should be 1 or 3, found {}'.format(
attr_count))
| 11,539 | 36.225806 | 80 | py |
models | models-master/official/projects/pointpillars/utils/utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pointpillars utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.utils import utils
class UtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([2, 1], [2, 1]),
([1, 1], [4, 3]),
([2, 2, 4], [2, 1, 5]),
)
def test_pad_or_trim_to_shape(self, original_shape, expected_shape):
x = np.ones(shape=original_shape)
x = utils.pad_or_trim_to_shape(x, expected_shape)
self.assertAllEqual(x.shape, expected_shape)
@parameterized.parameters(
([[1.1, 1.1, 2.2, 2.2]], 10.0, 5.0),
([[1.1, 10.1, 2.2, 10.2]], 10.0, 10.0),
([[-1.1, 10.1, -2.2, 10.2]], 5.0, 2.0),
)
def test_clip_boxes(self, boxes, height, width):
boxes = np.array(boxes)
boxes = utils.clip_boxes(boxes, height, width)
self.assertGreaterEqual(boxes[:, 0], 0.0)
self.assertGreaterEqual(boxes[:, 1], 0.0)
self.assertLessEqual(boxes[:, 2], height)
self.assertLessEqual(boxes[:, 3], width)
def test_get_vehicle_xy(self):
vehicle_xy = utils.get_vehicle_xy(10, 10, (-50, 50), (-50, 50))
self.assertEqual(vehicle_xy, (5, 5))
@parameterized.parameters(
([[1.0, 1.0]]),
([[-2.2, 4.2]]),
([[3.7, -10.3]]),
)
def test_frame_to_image_and_image_to_frame(self, frame_xy):
frame_xy = np.array(frame_xy)
vehicle_xy = (0, 0)
resolution = 1.0
image_xy = utils.frame_to_image_coord(frame_xy, vehicle_xy, 1 / resolution)
frame_xy_1 = utils.image_to_frame_coord(image_xy, vehicle_xy, resolution)
self.assertAllEqual(frame_xy_1, np.floor(frame_xy))
@parameterized.parameters(
([[1.0, 1.0, 2.0, 2.0]]),
([[-2.2, -4.2, 2.2, 4.2]]),
)
def test_frame_to_image_boxes_and_image_to_frame_boxes(self, frame_boxes):
frame_boxes = np.array(frame_boxes)
vehicle_xy = (0, 0)
resolution = 1.0
image_boxes = utils.frame_to_image_boxes(frame_boxes, vehicle_xy,
1 / resolution)
frame_boxes_1 = utils.image_to_frame_boxes(image_boxes, vehicle_xy,
resolution)
self.assertAllClose(frame_boxes_1, frame_boxes)
def test_generate_anchors(self):
min_level = 1
max_level = 3
image_size = [16, 16]
anchor_sizes = [(2.0, 1.0)]
all_anchors = utils.generate_anchors(min_level, max_level, image_size,
anchor_sizes)
for level in range(min_level, max_level + 1):
anchors = all_anchors[str(level)]
stride = 2**level
self.assertAllEqual(anchors.shape.as_list(),
[image_size[0] / stride, image_size[1] / stride, 4])
if __name__ == '__main__':
tf.test.main()
| 3,379 | 33.845361 | 79 | py |
models | models-master/official/projects/pointpillars/utils/model_exporter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars model export utility function for serving/inference."""
import os
from typing import Any, Dict, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.core import train_utils
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.utils import utils
def export_inference_graph(
batch_size: int,
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_dir: str,
export_module: Optional[export_base.ExportModule] = None,
):
"""Exports inference graph for PointPillars model.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
batch_size: An int number specifying batch size for inference.
Saved PointPillars model doesn't support dynamic batch size.
Only three batch sizes are acceptable:
train batch size per replica, evaluation batch size per replica, and 1.
params: An instance of cfg.ExperimentConfig.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
export_module: Optional export module to be used instead of using params
to create one.
"""
logging.info('Exporting model.')
if not export_module:
export_module = PointPillarsModule(
params=params,
batch_size=batch_size)
# Disable custom_gradients to make trt-converter be able to work.
# Consider to use tf.keras.models.save_model/load_model APIs to fix
# the custom gradients saving problem.
# https://github.com/tensorflow/tensorflow/issues/40166
save_options = tf.saved_model.SaveOptions(experimental_custom_gradients=False)
export_base.export(
export_module,
function_keys=['tensors'],
export_savedmodel_dir=export_dir,
checkpoint_path=checkpoint_path,
timestamped=False,
save_options=save_options)
logging.info('Saving checkpoint.')
ckpt = tf.train.Checkpoint(model=export_module.model)
ckpt.save(os.path.join(export_dir, 'checkpoint', 'ckpt'))
logging.info('Saving experiment params.')
train_utils.serialize_config(params, export_dir)
def load_model_predict_fn(export_dir: str) -> Any:
"""Load PointPillars model from saved directory.
Args:
export_dir: Export directory path.
Returns:
predict_fn: A function can be run for model inference.
"""
logging.info('Loading model from %s.', export_dir)
model = tf.saved_model.load(export_dir)
predict_fn = model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
return predict_fn
def random_input_tensors(
batch_size: int,
params: cfg.ExperimentConfig) -> Tuple[tf.Tensor, tf.Tensor]:
"""Create random input tensors for PointPillars model.
Args:
batch_size: An int number specifying batch size to inference.
params: An instance of cfg.ExperimentConfig.
Returns:
pillars: A tensor for input.
indices: A tensor for input.
"""
model_config = params.task.model
pillars_config = model_config.pillars
pillars = tf.random.uniform(
shape=[batch_size,
pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point],
minval=0.0,
maxval=1.0,
dtype=tf.float32,
name='pillars')
indices = tf.random.uniform(
shape=[batch_size, pillars_config.num_pillars, 2],
minval=0,
maxval=model_config.image.height,
dtype=tf.int32,
name='indices')
return pillars, indices
class PointPillarsModule(export_base.ExportModule):
"""PointPillars model export module."""
def __init__(self, params: cfg.ExperimentConfig, batch_size: int):
"""Initialize the module.
Args:
params: Experiment params.
batch_size: The batch size of the model input.
"""
self._params = params
self._batch_size = batch_size
self._pillars_spec, self._indices_spec = self._build_input_specs()
model = self._build_model()
super().__init__(params=params, model=model)
def _build_input_specs(
self) -> Tuple[tf.keras.layers.InputSpec, tf.keras.layers.InputSpec]:
pillars_config = self._params.task.model.pillars
pillars_spec = tf.keras.layers.InputSpec(
shape=(self._batch_size,
pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point),
dtype='float32')
indices_spec = tf.keras.layers.InputSpec(
shape=(self._batch_size,
pillars_config.num_pillars,
2),
dtype='int32')
return pillars_spec, indices_spec
def _build_model(self) -> tf.keras.Model:
logging.info('Building PointPillars model.')
input_specs = {
'pillars': self._pillars_spec, 'indices': self._indices_spec
}
model = factory.build_pointpillars(
input_specs=input_specs,
model_config=self._params.task.model,
# Train and eval batch size will be ignored for inference.
train_batch_size=1,
eval_batch_size=1)
return model
def serve(self, pillars: tf.Tensor, indices: tf.Tensor) -> Mapping[str, Any]:
"""Run model inference.
Args:
pillars: A float32 tensor.
indices: An int32 tensor.
Returns:
outputs: A dict of detected results.
"""
# Build image_shape and anchor_boxes on CPU.
with tf.device('cpu'):
model_config = self._params.task.model
image_size = [model_config.image.height,
model_config.image.width]
image_shape = tf.tile(tf.expand_dims(
image_size, axis=0), [self._batch_size, 1])
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
anchor_boxes = utils.generate_anchors(
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[self._batch_size, 1, 1, 1])
# Run model.
detections = self.model.call(
pillars=pillars,
indices=indices,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
training=None
)
outputs = {
'detection_boxes': detections['boxes'],
'detection_scores': detections['scores'],
'detection_classes': detections['classes'],
'num_detections': detections['num_detections']
}
# NOTE: Need to flatten attributes, because outputs for functions used as
# signatures must be a single Tensor, a sequence of Tensors, or a dictionary
# from string to Tensor.
outputs.update(detections['attributes'])
return outputs
@tf.function
def inference_from_tensors(
self, pillars: tf.Tensor, indices: tf.Tensor) -> Mapping[str, Any]:
return self.serve(pillars, indices)
def get_inference_signatures(
self, function_keys: Dict[str, str]) -> Mapping[str, Any]:
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for input_type, name in function_keys.items():
if input_type == 'tensors':
pillars = tf.TensorSpec(
shape=self._pillars_spec.shape,
dtype=self._pillars_spec.dtype,
name='pillars'
)
indices = tf.TensorSpec(
shape=self._indices_spec.shape,
dtype=self._indices_spec.dtype,
name='indices'
)
signatures[
name] = self.inference_from_tensors.get_concrete_function(
pillars, indices)
else:
raise ValueError('Unrecognized input_type: {}'.format(input_type))
return signatures
| 8,772 | 33.269531 | 80 | py |
models | models-master/official/projects/pointpillars/modeling/backbones_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import backbones
class BackboneTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([1, 32, 32, 3], 1, 1),
([2, 32, 64, 4], 1, 3),
)
def test_network_creation(self, input_shape, min_level, max_level):
batch_size = input_shape[0]
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=batch_size)
backbone = backbones.Backbone(input_shape, min_level, max_level)
endpoints = backbone(inputs)
_, h, w, c = input_shape
for level in range(min_level, max_level + 1):
self.assertAllEqual([
batch_size,
int(h / 2**level),
int(w / 2**level),
int(c * 2**(level - 1))
], endpoints[str(level)].shape.as_list())
def test_serialization(self):
kwargs = dict(
input_specs=[1, 64, 64, 3],
min_level=2,
max_level=4,
num_convs=3,
kernel_regularizer=None,
)
net = backbones.Backbone(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = backbones.Backbone.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
_ = new_net.to_json()
if __name__ == '__main__':
tf.test.main()
| 1,982 | 30.983871 | 74 | py |
models | models-master/official/projects/pointpillars/modeling/decoders_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoders."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import decoders
class DecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
({'1': [1, 32, 32, 3]},
1, 1),
({'1': [1, 32, 32, 3],
'2': [1, 16, 16, 6]},
1, 2)
)
def test_network_creation(self, input_shape, min_level, max_level):
"""Test if network could be created and infer with expected shapes."""
inputs = {}
for k, v in input_shape.items():
if k == str(min_level):
batch_size, height, width, _ = v
inputs[k] = tf.keras.Input(shape=v[1:], batch_size=batch_size)
decoder = decoders.Decoder(input_shape)
endpoints = decoder(inputs)
self.assertLen(endpoints, 1)
self.assertEqual(list(endpoints.keys())[0], str(min_level))
self.assertIn(str(min_level), endpoints)
expected_channels = input_shape[str(min_level)][-1] * 2 * (
max_level - min_level + 1)
self.assertAllEqual(endpoints[str(min_level)].shape.as_list(),
[batch_size, height, width, expected_channels])
def test_serialization(self):
kwargs = dict(
input_specs={'1': [1, 64, 64, 3]},
kernel_regularizer=None,
)
net = decoders.Decoder(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = decoders.Decoder.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
_ = new_net.to_json()
if __name__ == '__main__':
tf.test.main()
| 2,224 | 32.208955 | 74 | py |
models | models-master/official/projects/pointpillars/modeling/featurizers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Featurizer layers for Pointpillars."""
from typing import Any, List, Mapping, Optional, Tuple
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Featurizer(tf.keras.layers.Layer):
"""The featurizer to convert pillars to a BEV pseudo image.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It extract features from pillar
tensors then scatter them back to bird-eye-view (BEV) image using indices.
Notations:
B: batch size
H: height of the BEV image
W: width of the BEV image
P: number of pillars in an example
N: number of points in a pillar
D: number of features in a point
C: channels of the BEV image
"""
def __init__(
self,
image_size: Tuple[int, int],
pillars_size: Tuple[int, int, int],
train_batch_size: int,
eval_batch_size: int,
num_blocks: int,
num_channels: int,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the featurizer.
Args:
image_size: A [int, int] tuple to define the [H, W] of BEV image.
pillars_size: A [int, int, int] tuple to define the [P, N, D] of pillars.
train_batch_size: An `int` training batch size per replica.
eval_batch_size: An `int` evaluation batch size per replica.
num_blocks: An `int` number of blocks for extracting features.
num_channels: An `int` number channels of the BEV image.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
block layers. Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
super(Featurizer, self).__init__(**kwargs)
self._config_dict = {
'image_size': image_size,
'pillars_size': pillars_size,
'train_batch_size': train_batch_size,
'eval_batch_size': eval_batch_size,
'num_blocks': num_blocks,
'num_channels': num_channels,
'kernel_regularizer': kernel_regularizer,
}
self._image_shape = [image_size[0], image_size[1], num_channels]
utils.assert_channels_last()
def build(self, input_specs: List[tf.TensorShape]):
"""Creates variables for the featurizer."""
self._blocks = []
for _ in range(self._config_dict['num_blocks']):
self._blocks.append(
layers.ConvBlock(
filters=self._config_dict['num_channels'],
kernel_size=1,
strides=1,
kernel_regularizer=self._config_dict['kernel_regularizer']))
# These batch_dims are [B, P, 1] tensors that could be created before
# call(). They will be used for tf.scatter_nd to convert pillars to BEV
# images. Because tf.scatter_nd requires a concrete batch size, we need to
# prepare all possibilities of batch size for train, eval and test mode.
self._train_batch_dims = self._get_batch_dims(
self._config_dict['train_batch_size'])
self._eval_batch_dims = self._get_batch_dims(
self._config_dict['eval_batch_size'])
self._test_batch_dims = self._get_batch_dims(1)
super(Featurizer, self).build(input_specs)
def _get_batch_dims(self, batch_size: int) -> tf.Tensor:
p = self._config_dict['pillars_size'][0]
batch_dims = np.indices([batch_size, p])[0]
batch_dims = tf.convert_to_tensor(batch_dims, dtype=tf.int32)
batch_dims = tf.expand_dims(batch_dims, axis=-1)
return batch_dims
def _get_batch_size_and_dims(self,
training: bool = None) -> Tuple[int, tf.Tensor]:
# We use training as a ternary indicator, None for test mode.
# Test mode will be used for saving model and model inference.
if training is None:
batch_size = 1
batch_dims = self._test_batch_dims
else:
if training:
batch_size = self._config_dict['train_batch_size']
batch_dims = self._train_batch_dims
else:
batch_size = self._config_dict['eval_batch_size']
batch_dims = self._eval_batch_dims
return batch_size, batch_dims
def call(self,
pillars: tf.Tensor,
indices: tf.Tensor,
training: bool = None) -> tf.Tensor:
"""Forward pass of the featurizer."""
# Add batch index to pillar indices.
# (B, P, 1)
batch_size, batch_dims = self._get_batch_size_and_dims(training)
# (B, P, 3)
batch_indices = tf.concat([batch_dims, indices], axis=-1)
# Extract features from pillars.
# (B, P, N, D)
x = pillars
# (B, P, N, C)
for block in self._blocks:
x = block(x)
# (B, P, C)
x = tf.reduce_max(x, axis=2, keepdims=False)
# Scatter pillars back to form a BEV image.
# (B, H, W, C)
image = tf.scatter_nd(
batch_indices,
x,
shape=[batch_size] + self._image_shape)
self._output_specs = image.get_shape()
return image
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> tf.TensorShape:
return self._output_specs
| 5,936 | 34.550898 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.modeling import models
class PointPillarsBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, 4),
(1, 2),
(2, 1),
)
def test_builder(self, train_batch_size, eval_batch_size):
model_config = cfg.PointPillarsModel()
model_config.anchors = [cfg.Anchor(length=1.0, width=1.0)]
pillars_config = model_config.pillars
input_specs = {
'pillars':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point)),
'indices':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars, 2), dtype='int32'),
}
model = factory.build_pointpillars(
input_specs, model_config, train_batch_size, eval_batch_size
)
config = model.get_config()
new_model = models.PointPillarsModel.from_config(config)
_ = new_model.to_json()
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,023 | 33.305085 | 76 | py |
models | models-master/official/projects/pointpillars/modeling/models_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PointPillars models."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.pointpillars.modeling import backbones
from official.projects.pointpillars.modeling import decoders
from official.projects.pointpillars.modeling import featurizers
from official.projects.pointpillars.modeling import heads
from official.projects.pointpillars.modeling import models
from official.projects.pointpillars.utils import utils
from official.vision.modeling.layers import detection_generator
class PointpillarsTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_gpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
training=[True, False],
))
def test_all(self, strategy, training):
tf.keras.backend.set_image_data_format('channels_last')
num_classes = 2
h, w, c = 8, 8, 2
n, p, d = 2, 3, 4
image_size = [h, w]
pillars_size = [n, p, d]
indices_size = [n, 2]
attribute_heads = [{'name': 'heading', 'type': 'regression', 'size': 1}]
min_level = 1
max_level = 2
anchor_sizes = [(1.1, 1.1)]
num_anchors_per_location = len(anchor_sizes)
global_batch_size = 4
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
batch_size = int(global_batch_size / num_replicas)
pillars = tf.keras.Input(shape=pillars_size, batch_size=batch_size)
indices = tf.keras.Input(
shape=indices_size, batch_size=batch_size, dtype=tf.int32)
image_shape = tf.tile(tf.expand_dims([h, w], axis=0), [batch_size, 1])
max_num_detections = 4
# Test model creation.
with strategy.scope():
anchor_boxes = utils.generate_anchors(min_level,
max_level,
image_size,
anchor_sizes)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [batch_size, 1, 1, 1])
featurizer = featurizers.Featurizer(
image_size=image_size,
pillars_size=pillars_size,
train_batch_size=batch_size,
eval_batch_size=batch_size,
num_blocks=3,
num_channels=c
)
image = featurizer(pillars, indices, training)
backbone = backbones.Backbone(
input_specs=featurizer.output_specs,
min_level=min_level,
max_level=max_level,
num_convs=3
)
encoded_feats = backbone(image)
decoder = decoders.Decoder(
input_specs=backbone.output_specs)
decoded_feats = decoder(encoded_feats)
head = heads.SSDHead(
num_classes=num_classes,
num_anchors_per_location=num_anchors_per_location,
num_params_per_anchor=4,
attribute_heads=attribute_heads,
min_level=min_level,
max_level=max_level
)
_ = head(decoded_feats)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=max_num_detections,
nms_version='v1',
use_cpu_nms=True,
soft_nms_sigma=0.1)
model = models.PointPillarsModel(
featurizer=featurizer,
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
outputs = model(
pillars,
indices,
image_shape,
anchor_boxes,
training)
# Test training and evaluation.
if training:
cls_outputs = outputs['cls_outputs']
box_outputs = outputs['box_outputs']
for level in range(min_level, max_level+1):
self.assertIn(str(level), cls_outputs)
self.assertIn(str(level), box_outputs)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
num_classes * num_anchors_per_location
], cls_outputs[str(level)].shape)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
4 * num_anchors_per_location
], box_outputs[str(level)].shape)
att_outputs = outputs['attribute_outputs']
self.assertLen(att_outputs, 1)
self.assertIn('heading', att_outputs)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
1 * num_anchors_per_location
], att_outputs['heading'][str(level)].shape)
else:
self.assertIn('boxes', outputs)
self.assertIn('scores', outputs)
self.assertIn('classes', outputs)
self.assertIn('num_detections', outputs)
self.assertAllEqual([
batch_size,
], outputs['num_detections'].shape)
self.assertAllEqual([batch_size, max_num_detections, 4],
outputs['boxes'].shape)
self.assertAllEqual([batch_size, max_num_detections],
outputs['scores'].shape)
self.assertAllEqual([batch_size, max_num_detections],
outputs['classes'].shape)
self.assertIn('attributes', outputs)
self.assertAllEqual(
[batch_size, max_num_detections, 1],
outputs['attributes']['heading'].shape)
# Test serialization.
config = model.get_config()
new_model = models.PointPillarsModel.from_config(config)
_ = new_model.to_json()
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 6,623 | 34.805405 | 76 | py |
models | models-master/official/projects/pointpillars/modeling/layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Featurizer layers for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A conv2d followed by a norm then an activation."""
def __init__(
self,
filters: int,
kernel_size: int,
strides: int,
use_transpose_conv: bool = False,
kernel_initializer: Optional[tf.keras.initializers.Initializer] = tf.keras
.initializers.VarianceScaling(),
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_bias: bool = False,
bias_initializer: Optional[tf.keras.initializers.Initializer] = tf.keras
.initializers.Zeros(),
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_sync_bn: bool = True,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
bn_trainable: bool = True,
activation: str = 'relu',
**kwargs):
"""Initialize a block with conv, bn and activation.
Args:
filters: An int number of filters of the conv layer.
kernel_size: An int number of kernel size of the conv layer.
strides: An int number of strides of the conv layer.
use_transpose_conv: A bool for wether to use transpose conv or not.
kernel_initializer: A tf Initializer object for the conv layer.
kernel_regularizer: A tf Regularizer object for the conv layer.
use_bias: A bool for whether to use bias for the conv layer.
bias_initializer: A tf Initializer object for the conv layer bias.
bias_regularizer: A tf Regularizer object for the conv layer bias.
use_sync_bn: A bool for wether to use synchronized batch normalization.
norm_momentum: A float of normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
bn_trainable: A bool that indicates whether batch norm layers should be
trainable. Default to True.
activation: A str name of the activation function.
**kwargs: Additional keyword arguments to be passed.
"""
super(ConvBlock, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._use_transpose_conv = use_transpose_conv
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_bias = use_bias
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._bn_trainable = bn_trainable
self._activation = activation
self._activation_fn = tf_utils.get_activation(activation)
utils.assert_channels_last()
def build(self, input_shape: tf.TensorShape):
"""Creates variables for the block."""
# Config conv
if self._use_transpose_conv:
conv_op = tf.keras.layers.Conv2DTranspose
else:
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': 'same',
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
self._conv = conv_op(**conv_kwargs)
# Config norm
if self._use_sync_bn:
bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': -1,
'momentum': self._norm_momentum,
'epsilon': self._norm_epsilon,
'trainable': self._bn_trainable,
}
self._norm = bn_op(**bn_kwargs)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Forward pass of the block."""
x = inputs
x = self._conv(x)
x = self._norm(x)
outputs = self._activation_fn(x)
return outputs
def get_config(self) -> Mapping[str, Any]:
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'use_transpose_conv': self._use_transpose_conv,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_bias': self._use_bias,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable,
'activation': self._activation,
}
return config
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
| 5,643 | 36.131579 | 80 | py |
models | models-master/official/projects/pointpillars/modeling/heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Head layers for Pointpillars."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class SSDHead(tf.keras.layers.Layer):
"""A SSD head for PointPillars detection."""
def __init__(
self,
num_classes: int,
num_anchors_per_location: int,
num_params_per_anchor: int = 4,
attribute_heads: Optional[List[Dict[str, Any]]] = None,
min_level: int = 1,
max_level: int = 3,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the SSD Head.
Args:
num_classes: An `int` number of classes to predict.
num_anchors_per_location: An `int` number of anchors per location.
num_params_per_anchor: An `int` number of parameters per anchor.
attribute_heads: If not None, a list that contains a dict for each
additional attribute head. Each dict consists of 3 key-value pairs:
`name`, `type` ('regression' or 'classification'), and `size` (number
of predicted values for each instance).
min_level: An `int` of min level for output mutiscale features.
max_level: An `int` of max level for output mutiscale features.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
super(SSDHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'num_anchors_per_location': num_anchors_per_location,
'num_params_per_anchor': num_params_per_anchor,
'attribute_heads': attribute_heads,
'min_level': min_level,
'max_level': max_level,
'kernel_regularizer': kernel_regularizer,
}
utils.assert_channels_last()
def build(self, input_specs: Mapping[str, tf.TensorShape]):
self._decoder_output_level = int(min(input_specs.keys()))
if self._config_dict['min_level'] < self._decoder_output_level:
raise ValueError('The min_level should be >= decoder output '
'level, but {} < {}'.format(
self._config_dict['min_level'],
self._decoder_output_level))
# Multi-level convs.
# Set num_filters as the one of decoder's output level.
num_filters = input_specs[str(self._decoder_output_level)].as_list()[-1]
self._convs = {}
for level in range(self._decoder_output_level + 1,
self._config_dict['max_level'] + 1):
self._convs[str(level)] = layers.ConvBlock(
filters=num_filters,
kernel_size=3,
strides=2,
kernel_regularizer=self._config_dict['kernel_regularizer'])
# Detection convs, share weights across multi levels.
self._classifier = tf.keras.layers.Conv2D(
filters=(self._config_dict['num_classes'] *
self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)))
self._box_regressor = tf.keras.layers.Conv2D(
filters=(self._config_dict['num_params_per_anchor'] *
self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.zeros_initializer())
if self._config_dict['attribute_heads']:
self._att_predictors = {}
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
att_type = att_config['type']
att_size = att_config['size']
if att_type != 'regression':
raise ValueError('Unsupported head type: {}'.format(att_type))
self._att_predictors[att_name] = tf.keras.layers.Conv2D(
filters=(att_size * self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.zeros_initializer())
super(SSDHead, self).build(input_specs)
def call(
self, inputs: Mapping[str, tf.Tensor]
) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[Any, Dict[str, Any]]]:
# Build multi level features.
feats = {}
for level in range(self._decoder_output_level,
self._config_dict['max_level'] + 1):
if level == self._decoder_output_level:
x = inputs[str(level)]
else:
x = self._convs[str(level)](feats[level - 1])
feats[level] = x
# Get multi level detection.
scores = {}
boxes = {}
if self._config_dict['attribute_heads']:
attributes = {
att_config['name']: {}
for att_config in self._config_dict['attribute_heads']
}
else:
attributes = {}
for level in range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1):
# The branch to predict box classes.
scores[str(level)] = self._classifier(feats[level])
# The branch to predict boxes.
boxes[str(level)] = self._box_regressor(feats[level])
# The branches to predict box attributes.
if self._config_dict['attribute_heads']:
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
attributes[att_name][str(level)] = self._att_predictors[att_name](
feats[level])
return scores, boxes, attributes
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.layers.Layer:
return cls(**config)
| 7,055 | 39.090909 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
from typing import Mapping, Optional
from absl import logging
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.modeling import backbones
from official.projects.pointpillars.modeling import decoders
from official.projects.pointpillars.modeling import featurizers
from official.projects.pointpillars.modeling import heads
from official.projects.pointpillars.modeling import models
from official.vision.modeling.layers import detection_generator
def build_pointpillars(
input_specs: Mapping[str, tf.keras.layers.InputSpec],
model_config: cfg.PointPillarsModel,
train_batch_size: int,
eval_batch_size: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Build the PointPillars model.
Args:
input_specs: A {name: input_spec} dict used to construct inputs.
model_config: A PointPillarsModel config.
train_batch_size: An `int` of training batch size per replica.
eval_batch_size: An `int` of evaluation batch size per replica.
l2_regularizer: A L2 regularizer.
Returns:
model: A PointPillarsModel built from the config.
"""
# Build inputs
inputs = {}
for k, v in input_specs.items():
inputs[k] = tf.keras.Input(shape=v.shape[1:], dtype=v.dtype)
# Build featurizer
image_size = (model_config.image.height, model_config.image.width)
pillars_size = input_specs['pillars'].shape[1:]
featurizer_config = model_config.featurizer
featurizer = featurizers.Featurizer(
image_size=image_size,
pillars_size=pillars_size,
num_blocks=featurizer_config.num_blocks,
num_channels=featurizer_config.num_channels,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
kernel_regularizer=l2_regularizer)
image = featurizer(inputs['pillars'], inputs['indices'], training=True)
# Build backbone
backbone_config = model_config.backbone
backbone = backbones.Backbone(
input_specs=featurizer.output_specs,
min_level=backbone_config.min_level,
max_level=backbone_config.max_level,
num_convs=backbone_config.num_convs,
kernel_regularizer=l2_regularizer)
encoded_feats = backbone(image)
# Build decoder
decoder = decoders.Decoder(
input_specs=backbone.output_specs,
kernel_regularizer=l2_regularizer)
decoded_feats = decoder(encoded_feats)
# Build detection head
head_config = model_config.head
num_anchors_per_location = (len(model_config.anchors))
head = heads.SSDHead(
num_classes=model_config.num_classes,
num_anchors_per_location=num_anchors_per_location,
num_params_per_anchor=4,
attribute_heads=[
attr.as_dict() for attr in (head_config.attribute_heads or [])
],
min_level=model_config.min_level,
max_level=model_config.max_level,
kernel_regularizer=l2_regularizer)
scores, boxes, attrs = head(decoded_feats)
generator_config = model_config.detection_generator
detection_generator_obj = detection_generator.MultilevelDetectionGenerator(
apply_nms=generator_config.apply_nms,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_cpu_nms=generator_config.use_cpu_nms)
image_size = [model_config.image.height, model_config.image.width]
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
model = models.PointPillarsModel(
featurizer=featurizer,
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=detection_generator_obj,
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
logging.info('Train/Eval batch size per replica: %d/%d', train_batch_size,
eval_batch_size)
logging.info('Model inputs: %s', inputs)
logging.info('Outputs in training:')
logging.info('Featurizer output: %s', image)
logging.info('Backbone output: %s', encoded_feats)
logging.info('Decoder output: %s', decoded_feats)
logging.info('Detection head outputs: scores %s, boxes %s, atrributes %s',
scores, boxes, attrs)
return model
| 5,075 | 37.165414 | 77 | py |
models | models-master/official/projects/pointpillars/modeling/layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
class ConvBlockTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([1, 8, 8, 3], 4, 1, False),
([1, 8, 8, 3], 4, 2, False),
([1, 8, 8, 3], 2, 1, True),
([1, 8, 8, 3], 2, 2, True),
)
def test_creation(self, input_shape, filters, strides,
use_transpose_conv):
kernel_size = 3
n, h, w, _ = input_shape
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=n)
block = layers.ConvBlock(filters, kernel_size, strides, use_transpose_conv)
outputs = block(inputs)
if not use_transpose_conv:
if strides == 1:
self.assertAllEqual([n, h, w, filters], outputs.shape.as_list())
elif strides == 2:
self.assertAllEqual([n, h/2, w/2, filters], outputs.shape.as_list())
else:
if strides == 1:
self.assertAllEqual([n, h, w, filters], outputs.shape.as_list())
elif strides == 2:
self.assertAllEqual([n, h*2, w*2, filters], outputs.shape.as_list())
def test_serialization(self):
kwargs = dict(
filters=3,
kernel_size=3,
strides=1,
use_transpose_conv=False,
kernel_initializer=None,
kernel_regularizer=None,
use_bias=False,
bias_initializer=None,
bias_regularizer=None,
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
activation='relu',
)
net = layers.ConvBlock(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = layers.ConvBlock.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,489 | 31.763158 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/models.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars Model."""
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class PointPillarsModel(tf.keras.Model):
"""The PointPillars model class."""
def __init__(self,
featurizer: tf.keras.layers.Layer,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
head: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
min_level: int,
max_level: int,
image_size: Tuple[int, int],
anchor_sizes: List[Tuple[float, float]],
**kwargs):
"""Initialize the model class.
Args:
featurizer: A `tf.keras.layers.Layer` to extract features from pillars.
backbone: A `tf.keras.Model` to downsample feature images.
decoder: A `tf.keras.Model` to upsample feature images.
head: A `tf.keras.layers.Layer` to predict targets.
detection_generator: A `tf.keras.layers.Layer` to generate detections.
min_level: An `int` minimum level of multiscale outputs.
max_level: An `int` maximum level of multiscale outputs.
image_size: A tuple (height, width) of image size.
anchor_sizes: A list of tuple (length, width) of anchor boxes.
**kwargs: Additional keyword arguments to be passed.
"""
super(PointPillarsModel, self).__init__(**kwargs)
self._featurizer = featurizer
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._min_level = min_level
self._max_level = max_level
self._image_size = image_size
self._anchor_sizes = anchor_sizes
def generate_outputs(
self,
raw_scores: Dict[str, tf.Tensor],
raw_boxes: Dict[str, tf.Tensor],
raw_attributes: Dict[str, Dict[str, tf.Tensor]],
image_shape: Optional[tf.Tensor] = None,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
generate_detections: bool = False) -> Mapping[str, Any]:
if not raw_attributes:
raise ValueError('PointPillars model needs attribute heads.')
# Clap heading to [-pi, pi]
if 'heading' in raw_attributes:
raw_attributes['heading'] = utils.clip_heading(raw_attributes['heading'])
outputs = {
'cls_outputs': raw_scores,
'box_outputs': raw_boxes,
'attribute_outputs': raw_attributes,
}
# Cast raw prediction to float32 for loss calculation.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
if not generate_detections:
return outputs
if image_shape is None:
raise ValueError('Image_shape should not be None for evaluation.')
if anchor_boxes is None:
# Generate anchors if needed.
anchor_boxes = utils.generate_anchors(
self._min_level,
self._max_level,
self._image_size,
self._anchor_sizes,
)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[tf.shape(image_shape)[0], 1, 1, 1])
# Generate detected boxes.
if not self._detection_generator.get_config()['apply_nms']:
raise ValueError('An NMS algorithm is required for detection generator')
detections = self._detection_generator(raw_boxes, raw_scores,
anchor_boxes, image_shape,
raw_attributes)
outputs.update({
'boxes': detections['detection_boxes'],
'scores': detections['detection_scores'],
'classes': detections['detection_classes'],
'num_detections': detections['num_detections'],
'attributes': detections['detection_attributes'],
})
return outputs
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
pillars: tf.Tensor,
indices: tf.Tensor,
image_shape: Optional[tf.Tensor] = None,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
training: bool = None) -> Mapping[str, Any]:
"""Forward pass of the model.
Notation:
B: batch size
H_i: image height at level i
W_i: image width at level i
D: number of anchors per location
C: number of classes to predict
M: number of detected boxes
T: attribute size
P: number of pillars in an example
N: number of points in a pillar
D: number of features in a point
Args:
pillars: A tensor with shape [B, P, N, D].
indices: A tensor with shape [B, P, 2].
image_shape: A tensor with shape [B, 2] representing size of images.
anchor_boxes: A {level: tensor} dict contains multi level anchor boxes.
- key: a `str` level.
- value: a tensor with shape [B, H_i, W_i, 4 * D].
training: A `bool` indicating whether it's in training mode.
Returns:
cls_outputs: A {level: tensor} dict, tensor shape is [B, H_i, W_i, C * D].
box_outputs: A {level: tensor} dict, tensor shape is [B, H_i, W_i, 4 * D].
attribute_outputs: A {name: {level: tensor}} dict, tensor shape is
[B, H_i, W_i, T * D].
(Below are only for evaluation mode)
num_detections: A `int` tensor represent number of detected boxes.
boxes: A tensor with shape [B, M, 4].
scores: A tensor with shape [B, M].
classes: A tensor with shape [B, M].
attributes: A {name: tensor} dict, tensor shape is [B, M, T].
"""
images = self.featurizer(pillars, indices, training=training)
features = self.backbone(images)
features = self.decoder(features)
raw_scores, raw_boxes, raw_attributes = self.head(features)
return self.generate_outputs(raw_scores=raw_scores,
raw_boxes=raw_boxes,
raw_attributes=raw_attributes,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
generate_detections=not training)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(featurizer=self.featurizer,
backbone=self.backbone,
decoder=self.decoder,
head=self.head)
return items
@property
def featurizer(self) -> tf.keras.layers.Layer:
return self._featurizer
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
@property
def decoder(self) -> tf.keras.Model:
return self._decoder
@property
def head(self) -> tf.keras.layers.Layer:
return self._head
@property
def detection_generator(self) -> tf.keras.layers.Layer:
return self._detection_generator
def get_config(self) -> Mapping[str, Any]:
config_dict = {
'featurizer': self._featurizer,
'backbone': self._backbone,
'decoder': self._decoder,
'head': self._head,
'detection_generator': self._detection_generator,
'min_level': self._min_level,
'max_level': self._max_level,
'image_size': self._image_size,
'anchor_sizes': self._anchor_sizes,
}
return config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
| 8,181 | 36.53211 | 91 | py |
models | models-master/official/projects/pointpillars/modeling/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbone models for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Backbone(tf.keras.Model):
"""The backbone to extract features from BEV pseudo image.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It downsamples the input image
through convolutions and output features with multiple levels.
"""
def __init__(
self,
input_specs: tf.TensorShape,
min_level: int = 1,
max_level: int = 3,
num_convs: int = 4,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the backbone.
The output of the backbone is a multi-level features.
1 <= min_level <= max_level,
level_feature_size = input_image_size / 2 ^ level,
e.g. input size (32, 32), feature size should be:
(32, 32) at level 0, (16, 16) at level 1, (8, 8) at level 2, ...
Args:
input_specs: A `tf.TensorShape` of the input tensor.
min_level: An `int` of min level for output multiscale features.
max_level: An `int` of max level for output multiscale features.
num_convs: An `int` number of convolution layers in a downsample group.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
utils.assert_channels_last()
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_convs': num_convs,
'kernel_regularizer': kernel_regularizer,
}
# Onlly allow to output from level 1.
if min_level < 1:
raise ValueError(
'The min_level must be >= 1, but {} found.'.format(min_level))
input_channels = input_specs[-1]
inputs = tf.keras.Input(shape=input_specs[1:])
# build the net
x = inputs
net = {}
scale = 1
for level in range(1, max_level + 1):
x = self._block_group(
inputs=x,
filters=input_channels * scale)
scale *= 2
net[level] = x
# build endpoints
endpoints = {}
for level in range(min_level, max_level + 1):
endpoints[str(level)] = net[level]
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(Backbone, self).__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs: tf.Tensor,
filters: int) -> tf.Tensor:
"""A group of convolution layers to downsample inputs.
Args:
inputs: A tensor to be downsampled.
filters: An `int` number of filters of convolution.
Returns:
x: A tensor of downsampled feature.
"""
x = layers.ConvBlock(
filters=filters,
kernel_size=3,
strides=2,
kernel_regularizer=self._config_dict['kernel_regularizer'])(inputs)
for _ in range(1, self._config_dict['num_convs']):
x = layers.ConvBlock(
filters=filters,
kernel_size=3,
strides=1,
kernel_regularizer=self._config_dict['kernel_regularizer'])(x)
return x
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
return self._output_specs
| 4,423 | 32.515152 | 78 | py |
models | models-master/official/projects/pointpillars/modeling/heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoders."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import heads
class SSDHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(2, [], 1, 1),
(3, [{'name': 'z', 'type': 'regression', 'size': 1}], 1, 3))
def test_network_creation(self, num_classes, attribute_heads, min_level,
max_level):
"""Test if network could be created and infer with expected shapes."""
# Fix the input shape, anchor size and num of conv filters.
n, h, w, c = 1, 32, 32, 4
num_anchors_per_location = 3
num_params_per_anchor = 4
inputs = {'1': tf.keras.Input(shape=[h, w, c], batch_size=n)}
head = heads.SSDHead(num_classes, num_anchors_per_location,
num_params_per_anchor, attribute_heads, min_level,
max_level)
scores, boxes, attributes = head(inputs)
for level in range(min_level, max_level+1):
self.assertIn(str(level), scores)
self.assertIn(str(level), boxes)
scale = 2**(level - min_level)
self.assertAllEqual(scores[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), num_classes * num_anchors_per_location
])
self.assertAllEqual(boxes[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), num_params_per_anchor * num_anchors_per_location
])
for attr_head in attribute_heads:
name = attr_head['name']
size = attr_head['size']
self.assertIn(name, attributes)
attr = attributes[name]
for level in range(min_level, max_level+1):
self.assertIn(str(level), attr)
scale = 2**(level - min_level)
self.assertAllEqual(attr[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), size * num_anchors_per_location
])
def test_serialization(self):
kwargs = dict(
num_classes=2,
num_anchors_per_location=3,
num_params_per_anchor=4,
attribute_heads=[
{'name': 'z', 'type': 'regression', 'size': 1},
],
min_level=1,
max_level=3,
kernel_regularizer=None
)
net = heads.SSDHead(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = heads.SSDHead.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,185 | 34.010989 | 75 | py |
models | models-master/official/projects/pointpillars/modeling/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder models for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Decoder(tf.keras.Model):
"""The decoder to process feature maps learned by a backbone.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It upsamples the feature image
to the same size and combine them to be the output.
"""
def __init__(
self,
input_specs: Mapping[str, tf.TensorShape],
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the Decoder.
Args:
input_specs: A dict of {level: tf.TensorShape} of the input tensor.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
self._config_dict = {
'input_specs': input_specs,
'kernel_regularizer': kernel_regularizer,
}
utils.assert_channels_last()
# Only allow to process levels learned by a backbone.
min_level = int(min(input_specs.keys()))
max_level = int(max(input_specs.keys()))
# Build inputs
inputs = {}
# Set min_level as the output level.
output_level = min_level
for level, shape in input_specs.items():
# Set num_filters as 2c if the channels of backbone output level is c.
if int(level) == output_level:
num_filters = 2 * shape[-1]
inputs[level] = tf.keras.Input(shape=shape[1:])
# Build lateral features
lateral_feats = {}
for level in range(min_level, max_level + 1):
lateral_feats[level] = inputs[str(level)]
# Build scale-up path
feats = []
for level in range(min_level, max_level + 1):
x = layers.ConvBlock(
filters=num_filters,
kernel_size=3,
strides=int(2 ** (level - output_level)),
use_transpose_conv=True,
kernel_regularizer=kernel_regularizer)(
lateral_feats[level])
feats.append(x)
# Fuse all levels feature into the output level.
endpoints = {}
endpoints[str(output_level)] = tf.keras.layers.Concatenate(axis=-1)(feats)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(Decoder, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
return self._output_specs
| 3,593 | 32.90566 | 78 | py |
models | models-master/official/projects/pointpillars/modeling/featurizers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import featurizers
class FeaturizerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([32, 32], [16, 4, 2], 4, 2, 1),
([32, 16], [1, 3, 1], 2, 2, 3),
)
def test_network_creation(self, image_size, pillars_size, train_batch_size,
eval_batch_size, num_blocks):
num_channels = 3
h, w = image_size
n, _, _ = pillars_size
featurizer = featurizers.Featurizer(image_size, pillars_size,
train_batch_size, eval_batch_size,
num_blocks, num_channels)
# Train mode.
pillars = tf.keras.Input(shape=pillars_size, batch_size=train_batch_size)
indices = tf.keras.Input(
shape=[n, 2], batch_size=train_batch_size, dtype=tf.int32)
image = featurizer(pillars, indices, training=True)
self.assertAllEqual([train_batch_size, h, w, num_channels],
image.shape.as_list())
# Evaluation mode.
pillars = tf.keras.Input(shape=pillars_size, batch_size=eval_batch_size)
indices = tf.keras.Input(
shape=[n, 2], batch_size=eval_batch_size, dtype=tf.int32)
image = featurizer(pillars, indices, training=False)
self.assertAllEqual([eval_batch_size, h, w, num_channels],
image.shape.as_list())
# Test mode, batch size must be 1.
pillars = tf.keras.Input(shape=pillars_size, batch_size=1)
indices = tf.keras.Input(
shape=[n, 2], batch_size=1, dtype=tf.int32)
image = featurizer(pillars, indices, training=None)
self.assertAllEqual([1, h, w, num_channels],
image.shape.as_list())
def test_serialization(self):
kwargs = dict(
image_size=[4, 4],
pillars_size=[4, 5, 6],
train_batch_size=4,
eval_batch_size=2,
num_blocks=3,
num_channels=4,
kernel_regularizer=None,
)
net = featurizers.Featurizer(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = featurizers.Featurizer.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,974 | 35.280488 | 77 | py |
models | models-master/official/projects/pointpillars/tasks/pointpillars.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars task definition."""
import functools
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import task_factory
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.dataloaders import decoders
from official.projects.pointpillars.dataloaders import parsers
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.utils import utils
from official.vision.dataloaders import input_reader_factory
from official.vision.losses import focal_loss
from official.vision.losses import loss_utils
def pick_dataset_fn(file_type: str) -> Any:
if file_type == 'tfrecord':
return tf.data.TFRecordDataset
if file_type == 'tfrecord_compressed':
return functools.partial(tf.data.TFRecordDataset, compression_type='GZIP')
raise ValueError('Unrecognized file_type: {}'.format(file_type))
def get_batch_size_per_replica(global_batch_size: int) -> int:
"""Get batch size per accelerator replica."""
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if global_batch_size < num_replicas:
logging.warning('Global batch size is smaller than num replicas. '
'Set batch size per replica to 1.')
return 1
if global_batch_size % num_replicas != 0:
raise ValueError(
'global_batch_size {} is not a multiple of num_replicas {}'
.format(global_batch_size, num_replicas))
batch_size = int(global_batch_size / num_replicas)
return batch_size
@task_factory.register_task_cls(cfg.PointPillarsTask)
class PointPillarsTask(base_task.Task):
"""A single-replica view of training procedure."""
def __init__(self,
params: cfg.PointPillarsTask,
logging_dir: Optional[str] = None,
name: Optional[str] = None):
super().__init__(params, logging_dir, name)
self._model = None
self._attribute_heads = self.task_config.model.head.attribute_heads
def build_model(self) -> tf.keras.Model:
# Create only one model instance if this function is called multiple times.
if self._model is not None:
return self._model
pillars_config = self.task_config.model.pillars
input_specs = {
'pillars':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point)),
'indices':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars, 2), dtype='int32'),
}
train_batch_size = get_batch_size_per_replica(
self.task_config.train_data.global_batch_size)
eval_batch_size = get_batch_size_per_replica(
self.task_config.validation_data.global_batch_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
self._model = factory.build_pointpillars(
input_specs=input_specs,
model_config=self.task_config.model,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
l2_regularizer=l2_regularizer)
return self._model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Build input dataset."""
model_config = self.task_config.model
if (model_config.classes != 'all' and
model_config.num_classes != 2):
raise ValueError('Model num_classes must be 2 when not for all classes.')
decoder = decoders.ExampleDecoder(model_config.image, model_config.pillars)
image_size = [model_config.image.height, model_config.image.width]
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
anchor_labeler_config = model_config.anchor_labeler
parser = parsers.Parser(
classes=model_config.classes,
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes,
match_threshold=anchor_labeler_config.match_threshold,
unmatched_threshold=anchor_labeler_config.unmatched_threshold,
max_num_detections=model_config.detection_generator
.max_num_detections,
dtype=params.dtype,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def compute_attribute_losses(
self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: tf.Tensor) -> Mapping[str, float]:
"""Computes attribute loss."""
att_loss_fn = tf.keras.losses.Huber(
self.task_config.losses.huber_loss_delta,
reduction=tf.keras.losses.Reduction.SUM)
losses = {}
total_loss = 0.0
for head in self._attribute_heads:
if head.type != 'regression':
raise ValueError(f'Attribute type {head.type} not supported.')
y_true_att = loss_utils.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size)
y_pred_att = loss_utils.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size)
if head.name == 'heading':
# Direction aware loss, wrap the delta angle to [-pi, pi].
# Otherwise for a loss that is symmetric to direction (i.e., heading 0
# and pi are the same), we use a tf.sin transform.
delta = utils.wrap_angle_rad(y_pred_att - y_true_att)
loss = att_loss_fn(
y_true=tf.zeros_like(delta),
y_pred=delta,
sample_weight=box_sample_weight)
else:
loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
total_loss += loss
losses[head.name] = loss
losses['total'] = total_loss
return losses
def compute_losses(
self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Mapping[str, float]:
"""Build losses."""
params = self.task_config
cls_loss_fn = focal_loss.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM)
box_loss_fn = tf.keras.losses.Huber(
params.losses.huber_loss_delta,
reduction=tf.keras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = tf.reduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = loss_utils.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)
y_pred_cls = loss_utils.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = loss_utils.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = loss_utils.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
attribute_losses = self.compute_attribute_losses(outputs, labels,
box_sample_weight)
model_loss = (
cls_loss + box_loss * params.losses.box_loss_weight +
attribute_losses['total'] * params.losses.attribute_loss_weight)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss += reg_loss
total_loss = params.losses.loss_weight * total_loss
losses = {
'class_loss': cls_loss,
'box_loss': box_loss,
'attribute_loss': attribute_losses['total'],
'model_loss': model_loss,
'total_loss': total_loss,
}
for head in self._attribute_heads:
losses[head.name + '_loss'] = attribute_losses[head.name]
return losses
def build_metrics(self, training: bool = True) -> List[tf.metrics.Metric]:
"""Define metrics and how to calculate them."""
# train/validation loss metrics
loss_names = [
'class_loss', 'box_loss', 'attribute_loss', 'model_loss', 'total_loss'
]
for head in self._attribute_heads:
loss_names.append(head.name + '_loss')
metrics = []
for name in loss_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
# Use a separate metric for WOD validation.
if not training:
if self.task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific tensorflow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.projects.pointpillars.utils import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self._wod_metric = wod_detection_evaluator.create_evaluator(
self.task_config.model)
return metrics
def train_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[tf.metrics.Metric]] = None) -> Mapping[str, Any]:
"""Does forward and backward."""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(pillars=features['pillars'],
indices=features['indices'],
training=True)
losses = self.compute_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# Computes per-replica loss.
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# For updating trainer.train_loss
logs = {self.loss: losses['total_loss']}
# For updating trainer.train_metrics
if metrics:
for m in metrics:
m.update_state(losses[m.name])
return logs
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[tf.metrics.Metric]] = None) -> Mapping[str, Any]:
"""Validatation step."""
features, labels = inputs
outputs = model(pillars=features['pillars'],
indices=features['indices'],
image_shape=labels['image_shape'],
anchor_boxes=labels['anchor_boxes'],
training=False)
losses = self.compute_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# For updating trainer.validation_loss
logs = {self.loss: losses['total_loss']}
# For updating trainer.validation_metrics
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if self.task_config.use_wod_metrics:
logs.update(
{self._wod_metric.name: (labels['groundtruths'], outputs)})
return logs
def aggregate_logs(self,
state: Any = None,
step_outputs: Any = None) -> Any:
"""Called after each validation_step to update metrics."""
logging.log_every_n(logging.INFO,
'Aggregating metrics after one evaluation step.', 1000)
if self.task_config.use_wod_metrics:
if state is None:
self._wod_metric.reset_states()
self._wod_metric.update_state(step_outputs[self._wod_metric.name][0],
step_outputs[self._wod_metric.name][1])
if state is None:
state = True
return state
def reduce_aggregated_logs(self,
aggregated_logs: Any,
global_step: Optional[tf.Tensor] = None) -> Any:
"""Called after eval_end to calculate metrics."""
logging.info('Reducing aggregated metrics after one evaluation cycle.')
logs = {}
if self.task_config.use_wod_metrics:
logs.update(self._wod_metric.result())
return logs
| 15,302 | 38.440722 | 138 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.