python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
d2go-main
|
tests/utils/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from detectron2.layers import cat
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.structures import Boxes
from mobile_cv.common.misc.oss_utils import is_oss
class TestBoxWithNMSLimit(unittest.TestCase):
@unittest.skipIf(is_oss(), "Caffe2 is not available for OSS")
def test_caffe2_pytorch_eq(self):
ims_per_batch = 8
post_nms_topk = 100
detections_per_im = 10
num_class = 80
score_thresh = 0.05
nms_thresh = 0.5
image_shapes = [torch.Size([800, 800])] * ims_per_batch
batch_splits = [post_nms_topk] * ims_per_batch
# NOTE: There're still some unsure minor implementation differences
# (eg. ordering when equal score across classes) causing some seeds
# don't pass the test.
# Thus set a fixed seed to make sure this test passes consistantly.
rng = torch.Generator()
rng.manual_seed(42)
boxes = []
for n in batch_splits:
box = 1000.0 * 0.5 * torch.rand(n, num_class, 4, generator=rng) + 0.001
box[:, :, -2:] += box[:, :, :2]
box = box.view(n, num_class * 4)
boxes.append(box)
scores = [torch.rand(n, num_class + 1, generator=rng) for n in batch_splits]
ref_results, ref_kept_indices = fast_rcnn_inference(
boxes,
scores,
image_shapes,
score_thresh=score_thresh,
nms_thresh=nms_thresh,
topk_per_image=detections_per_im,
)
for result, kept_index, score in zip(ref_results, ref_kept_indices, scores):
torch.testing.assert_close(
score[kept_index, result.pred_classes],
result.scores,
)
# clip is done in BBoxTransformOp
c2_boxes = []
for box, image_shape in zip(boxes, image_shapes):
num_bbox_reg_classes = box.shape[1] // 4
clipped_box = Boxes(box.reshape(-1, 4))
clipped_box.clip(image_shape)
clipped_box = clipped_box.tensor.view(-1, num_bbox_reg_classes * 4)
c2_boxes.append(clipped_box)
c2_boxes = cat(c2_boxes)
c2_scores = cat(scores)
c2_batch_splits = torch.Tensor(batch_splits)
nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
c2_scores,
c2_boxes,
c2_batch_splits,
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(detections_per_im),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=False,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=False,
output_classes_include_bg_cls=False,
legacy_plus_one=False,
)
(
roi_score_nms,
roi_bbox_nms,
roi_class_nms,
roi_batch_splits_nms,
roi_keeps_nms,
roi_keeps_size_nms,
) = nms_outputs # noqa
roi_score_nms = roi_score_nms.split(roi_batch_splits_nms.int().tolist())
roi_bbox_nms = roi_bbox_nms.split(roi_batch_splits_nms.int().tolist())
roi_class_nms = roi_class_nms.split(roi_batch_splits_nms.int().tolist())
roi_keeps_nms = roi_keeps_nms.split(roi_batch_splits_nms.int().tolist())
for _score_nms, _class_nms, _keeps_nms, _score in zip(
roi_score_nms, roi_class_nms, roi_keeps_nms, scores
):
torch.testing.assert_close(
_score[_keeps_nms.to(torch.int64), _class_nms.to(torch.int64)],
_score_nms,
)
for ref, s, b, c in zip(
ref_results, roi_score_nms, roi_bbox_nms, roi_class_nms
):
s1, i1 = s.sort()
s2, i2 = ref.scores.sort()
torch.testing.assert_close(s1, s2)
torch.testing.assert_close(b[i1], ref.pred_boxes.tensor[i2])
torch.testing.assert_close(c.to(torch.int64)[i1], ref.pred_classes[i2])
for ref, k in zip(ref_kept_indices, roi_keeps_nms):
# NOTE: order might be different due to implementation
ref_set = set(ref.tolist())
k_set = set(k.tolist())
self.assertEqual(ref_set, k_set)
|
d2go-main
|
tests/modeling/test_box_with_nms_limit.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.optimizer.build import build_optimizer_mapper
from d2go.utils.testing import helper
class TestArch(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=5, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.linear = torch.nn.Linear(4, 1)
def forward(self, x):
ret = self.conv(x)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
ret = torch.transpose(ret, 1, 3)
ret = self.linear(ret)
return ret
def _test_each_optimizer(cfg, cuda: bool = False):
print("Solver: " + str(cfg.SOLVER.OPTIMIZER))
device = "cuda:0" if cuda else "cpu"
model = TestArch().to(device)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = build_optimizer_mapper(cfg, model)
optimizer.zero_grad()
random.seed(20210912)
num_iter = 500
for _ in range(num_iter):
target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1)).to(device)
noise = torch.rand(1, 3, 16, 16).to(device)
x = torch.add(noise, 2 * target)
y_pred = model(x)
loss = criterion(y_pred, target)
loss.backward()
optimizer.step()
n_correct = 0
n_eval = 100
for _ in range(n_eval):
target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1)).to(device)
x = torch.add(torch.rand(1, 3, 16, 16).to(device), 2 * target)
y_pred = torch.round(torch.sigmoid(model(x)))
if y_pred == target:
n_correct += 1
print("Correct prediction rate {0}.".format(n_correct / n_eval))
def _check_param_group(self, group, num_params=None, **kwargs):
if num_params is not None:
self.assertEqual(len(group["params"]), num_params)
for key, val in kwargs.items():
self.assertEqual(group[key], val)
def get_optimizer_cfg(
lr,
weight_decay=None,
weight_decay_norm=None,
weight_decay_bias=None,
lr_mult=None,
):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
if lr is not None:
cfg.SOLVER.BASE_LR = lr
if weight_decay is not None:
cfg.SOLVER.WEIGHT_DECAY = weight_decay
if weight_decay_norm is not None:
cfg.SOLVER.WEIGHT_DECAY_NORM = weight_decay_norm
if weight_decay_bias is not None:
cfg.SOLVER.WEIGHT_DECAY_BIAS = weight_decay_bias
if lr_mult is not None:
cfg.SOLVER.LR_MULTIPLIER_OVERWRITE = [lr_mult]
return cfg
class TestOptimizer(unittest.TestCase):
def test_create_optimizer_default(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0, weight_decay=1.0, weight_decay_norm=1.0, weight_decay_bias=1.0
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 1)
_check_param_group(
self, optimizer.param_groups[0], num_params=4, weight_decay=1.0, lr=1.0
)
def test_create_optimizer_lr(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1)
self.conv2 = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv2(self.conv1(x)))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0,
lr_mult={"conv1": 3.0, "conv2": 3.0},
weight_decay=2.0,
weight_decay_norm=2.0,
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 2)
_check_param_group(self, optimizer.param_groups[0], num_params=4, lr=3.0)
_check_param_group(self, optimizer.param_groups[1], num_params=2, lr=1.0)
def test_create_optimizer_weight_decay_norm(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
model = Model()
cfg = get_optimizer_cfg(
lr=1.0, weight_decay=1.0, weight_decay_norm=2.0, weight_decay_bias=1.0
)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 2)
_check_param_group(
self, optimizer.param_groups[0], num_params=2, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=2, lr=1.0, weight_decay=2.0
)
OPTIMIZER_NAMES_PART1 = ["SGD", "AdamW", "SGD_MT"]
OPTIMIZER_NAMES_PART2 = ["AdamW_MT", "Adam"]
def _test_optimizers_list(self, optimizers_list, fused: bool = False):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
multipliers = [None, [{"conv": 0.1}]]
for optimizer_name in optimizers_list:
for mult in multipliers:
cfg.SOLVER.BASE_LR = 0.01
cfg.SOLVER.FUSED = fused
cfg.SOLVER.OPTIMIZER = optimizer_name
cfg.SOLVER.MULTIPLIERS = mult
_test_each_optimizer(cfg, cuda=fused)
def test_all_optimizers_part_1(self):
self._test_optimizers_list(self.OPTIMIZER_NAMES_PART1)
def test_all_optimizers_part_2(self):
self._test_optimizers_list(self.OPTIMIZER_NAMES_PART2)
def _test_full_model_grad_clipping(self, optimizers_list):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
for optimizer_name in optimizers_list:
cfg.SOLVER.BASE_LR = 0.02
cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 0.2
cfg.SOLVER.CLIP_GRADIENTS.ENABLED = True
cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "full_model"
cfg.SOLVER.OPTIMIZER = optimizer_name
_test_each_optimizer(cfg)
def test_full_model_grad_clipping_part1(self):
self._test_full_model_grad_clipping(self.OPTIMIZER_NAMES_PART1)
def test_full_model_grad_clipping_part2(self):
self._test_full_model_grad_clipping(self.OPTIMIZER_NAMES_PART2)
def test_create_optimizer_custom(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
def get_optimizer_param_groups(self, _opts):
ret = [
{
"params": [self.conv.weight],
"lr": 10.0,
}
]
return ret
model = Model()
cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 3)
_check_param_group(
self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0
)
@helper.enable_ddp_env()
def test_create_optimizer_custom_ddp(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(self.conv(x))
def get_optimizer_param_groups(self, _opts):
ret = [
{
"params": [self.conv.weight],
"lr": 10.0,
}
]
return ret
model = Model()
model = torch.nn.parallel.DistributedDataParallel(model)
cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)
optimizer = build_optimizer_mapper(cfg, model)
self.assertEqual(len(optimizer.param_groups), 3)
_check_param_group(
self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0
)
_check_param_group(
self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0
)
|
d2go-main
|
tests/modeling/test_optimizer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import os
import unittest
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
)
from d2go.utils.testing.rcnn_helper import get_quick_test_config_opts, RCNNBaseTestCases
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.oss_utils import is_oss
def _maybe_skip_test(self, predictor_type):
if is_oss() and "@c2_ops" in predictor_type:
self.skipTest("Caffe2 is not available for OSS")
if not torch.cuda.is_available() and "_gpu" in predictor_type:
self.skipTest("GPU is not available for exporting GPU model")
class TestFBNetV3MaskRCNNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript@c2_ops", True],
["torchscript", True],
["torchscript_int8@c2_ops", False],
["torchscript_int8", False],
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3MaskRCNNFPNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3g_fpn.yaml")
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
# FIXME: exporting c2_ops for FPN model might not pass this test for certain
# combination of image sizes and resizing targets. data points are:
# - passes before D35238890: image_size and resizing target are both 32x64 (backbone's divisibility).
# - doesn't pass after D35238890: image_size are 32x64, resizing to 5x10.
["torchscript@c2_ops", False],
["torchscript", True],
["torchscript_int8@c2_ops", False],
["torchscript_int8", False],
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3MaskRCNNQATEager(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
# enable QAT
self.cfg.merge_from_list(
[
"QUANTIZATION.BACKEND",
"qnnpack",
"QUANTIZATION.QAT.ENABLED",
"True",
]
)
# FIXME: NaiveSyncBN is not supported
self.cfg.merge_from_list(["MODEL.FBNET_V2.NORM", "bn"])
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript_int8@c2_ops", False], # TODO: fix mismatch
["torchscript_int8", False], # TODO: fix mismatch
]
)
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match)
class TestFBNetV3KeypointRCNNFP32(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://keypoint_rcnn_fbnetv3a_dsmask_C4.yaml")
# FIXME: have to use qnnpack due to follow error:
# Per Channel Quantization is currently disabled for transposed conv
self.cfg.merge_from_list(
[
"QUANTIZATION.BACKEND",
"qnnpack",
]
)
def test_inference(self):
self._test_inference()
@RCNNBaseTestCases.expand_parameterized_test_export(
[
["torchscript_int8@c2_ops", False], # TODO: fix mismatch
["torchscript_int8", False], # TODO: fix mismatch
]
)
def test_export(self, predictor_type, compare_match):
if is_oss() and "@c2_ops" in predictor_type:
self.skipTest("Caffe2 is not available for OSS")
self._test_export(predictor_type, compare_match=compare_match)
class TestTorchVisionExport(unittest.TestCase):
def test_export_torchvision_format(self):
runner = GeneralizedRCNNRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
cfg.merge_from_list(get_quick_test_config_opts())
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
pytorch_model = runner.build_model(cfg, eval_only=True)
from typing import Dict, List
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inputs: List[torch.Tensor]):
x = inputs[0].unsqueeze(0) * 255
scale = 320.0 / min(x.shape[-2], x.shape[-1])
x = torch.nn.functional.interpolate(
x,
scale_factor=scale,
mode="bilinear",
align_corners=True,
recompute_scale_factor=True,
)
out = self.model(x[0])
res: Dict[str, torch.Tensor] = {}
res["boxes"] = out[0] / scale
res["labels"] = out[2]
res["scores"] = out[1]
return inputs, [res]
size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_detection_data_loader_on_toy_dataset(
cfg, h, w, is_train=False
) as data_loader:
with make_temp_directory("test_export_torchvision_format") as tmp_dir:
predictor_path = convert_and_export_predictor(
cfg,
copy.deepcopy(pytorch_model),
"torchscript",
tmp_dir,
data_loader,
)
orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
wrapped_model = Wrapper(orig_model)
# optionally do a forward
wrapped_model([torch.rand(3, 600, 600)])
scripted_model = torch.jit.script(wrapped_model)
scripted_model.save(os.path.join(tmp_dir, "new_file.pt"))
class TestMaskRCNNExportOptions(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def _get_test_image_sizes(self, is_train):
# postprocessing requires no resize from "data loader"
return self._get_test_image_size_no_resize(is_train)
def test_tracing_with_postprocess(self):
self.cfg.merge_from_list(["RCNN_EXPORT.INCLUDE_POSTPROCESS", True])
self._test_export("torchscript@tracing", compare_match=True)
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_meta_arch_rcnn.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from detectron2.layers import nms as box_nms
class TestNMS(unittest.TestCase):
def test_nms_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
inputs = (
np.array(
[
10,
10,
50,
60,
0.5,
11,
12,
48,
60,
0.7,
8,
9,
40,
50,
0.6,
100,
100,
150,
140,
0.9,
99,
110,
155,
139,
0.8,
]
)
.astype(np.float32)
.reshape(-1, 5)
)
boxes = torch.from_numpy(inputs[:, :4])
scores = torch.from_numpy(inputs[:, 4])
test_thresh = [0.1, 0.3, 0.5, 0.8, 0.9]
gt_indices = [[1, 3], [1, 3], [1, 3], [1, 2, 3, 4], [0, 1, 2, 3, 4]]
for thresh, gt_index in zip(test_thresh, gt_indices):
keep_indices = box_nms(boxes, scores, thresh)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, np.array(gt_index))
def test_nms1_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS1 in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
boxes = torch.from_numpy(
np.array(
[
[350.9821, 161.8200, 369.9685, 205.2372],
[250.5236, 154.2844, 274.1773, 204.9810],
[471.4920, 160.4118, 496.0094, 213.4244],
[352.0421, 164.5933, 366.4458, 205.9624],
[166.0765, 169.7707, 183.0102, 232.6606],
[252.3000, 183.1449, 269.6541, 210.6747],
[469.7862, 162.0192, 482.1673, 187.0053],
[168.4862, 174.2567, 181.7437, 232.9379],
[470.3290, 162.3442, 496.4272, 214.6296],
[251.0450, 155.5911, 272.2693, 203.3675],
[252.0326, 154.7950, 273.7404, 195.3671],
[351.7479, 161.9567, 370.6432, 204.3047],
[496.3306, 161.7157, 515.0573, 210.7200],
[471.0749, 162.6143, 485.3374, 207.3448],
[250.9745, 160.7633, 264.1924, 206.8350],
[470.4792, 169.0351, 487.1934, 220.2984],
[474.4227, 161.9546, 513.1018, 215.5193],
[251.9428, 184.1950, 262.6937, 207.6416],
[252.6623, 175.0252, 269.8806, 213.7584],
[260.9884, 157.0351, 288.3554, 206.6027],
[251.3629, 164.5101, 263.2179, 202.4203],
[471.8361, 190.8142, 485.6812, 220.8586],
[248.6243, 156.9628, 264.3355, 199.2767],
[495.1643, 158.0483, 512.6261, 184.4192],
[376.8718, 168.0144, 387.3584, 201.3210],
[122.9191, 160.7433, 172.5612, 231.3837],
[350.3857, 175.8806, 366.2500, 205.4329],
[115.2958, 162.7822, 161.9776, 229.6147],
[168.4375, 177.4041, 180.8028, 232.4551],
[169.7939, 184.4330, 181.4767, 232.1220],
[347.7536, 175.9356, 355.8637, 197.5586],
[495.5434, 164.6059, 516.4031, 207.7053],
[172.1216, 194.6033, 183.1217, 235.2653],
[264.2654, 181.5540, 288.4626, 214.0170],
[111.7971, 183.7748, 137.3745, 225.9724],
[253.4919, 186.3945, 280.8694, 210.0731],
[165.5334, 169.7344, 185.9159, 232.8514],
[348.3662, 184.5187, 354.9081, 201.4038],
[164.6562, 162.5724, 186.3108, 233.5010],
[113.2999, 186.8410, 135.8841, 219.7642],
[117.0282, 179.8009, 142.5375, 221.0736],
[462.1312, 161.1004, 495.3576, 217.2208],
[462.5800, 159.9310, 501.2937, 224.1655],
[503.5242, 170.0733, 518.3792, 209.0113],
[250.3658, 195.5925, 260.6523, 212.4679],
[108.8287, 163.6994, 146.3642, 229.7261],
[256.7617, 187.3123, 288.8407, 211.2013],
[161.2781, 167.4801, 186.3751, 232.7133],
[115.3760, 177.5859, 163.3512, 236.9660],
[248.9077, 188.0919, 264.8579, 207.9718],
[108.1349, 160.7851, 143.6370, 229.6243],
[465.0900, 156.7555, 490.3561, 213.5704],
[107.5338, 173.4323, 141.0704, 235.2910],
]
).astype(np.float32)
)
scores = torch.from_numpy(
np.array(
[
0.1919,
0.3293,
0.0860,
0.1600,
0.1885,
0.4297,
0.0974,
0.2711,
0.1483,
0.1173,
0.1034,
0.2915,
0.1993,
0.0677,
0.3217,
0.0966,
0.0526,
0.5675,
0.3130,
0.1592,
0.1353,
0.0634,
0.1557,
0.1512,
0.0699,
0.0545,
0.2692,
0.1143,
0.0572,
0.1990,
0.0558,
0.1500,
0.2214,
0.1878,
0.2501,
0.1343,
0.0809,
0.1266,
0.0743,
0.0896,
0.0781,
0.0983,
0.0557,
0.0623,
0.5808,
0.3090,
0.1050,
0.0524,
0.0513,
0.4501,
0.4167,
0.0623,
0.1749,
]
).astype(np.float32)
)
gt_indices = np.array(
[
1,
6,
7,
8,
11,
12,
13,
14,
17,
18,
19,
21,
23,
24,
25,
26,
30,
32,
33,
34,
35,
37,
43,
44,
47,
50,
]
)
keep_indices = box_nms(boxes, scores, 0.5)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, gt_indices)
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_nms.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import unittest
import d2go.runner.default_runner as default_runner
import torch
from d2go.modeling import ema
from d2go.utils.testing import helper
class TestArch(torch.nn.Module):
def __init__(self, value=None, int_value=None):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
if value is not None:
self.set_const_weights(value, int_value)
def forward(self, x):
ret = self.conv(x)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return ret
def set_const_weights(self, value, int_value=None):
if int_value is None:
int_value = int(value)
for x in itertools.chain(self.parameters(), self.buffers()):
if x.dtype == torch.float32:
x.data.fill_(value)
else:
x.data.fill_(int_value)
def _compare_state_dict(model1, model2, abs_error=1e-3):
sd1 = model1.state_dict()
sd2 = model2.state_dict()
if len(sd1) != len(sd2):
return False
if set(sd1.keys()) != set(sd2.keys()):
return False
for name in sd1:
if sd1[name].dtype == torch.float32:
if torch.abs((sd1[name] - sd2[name])).max() > abs_error:
return False
elif (sd1[name] != sd2[name]).any():
return False
return True
class TestModelingModelEMA(unittest.TestCase):
def test_emastate(self):
model = TestArch()
state = ema.EMAState.FromModel(model)
# two for conv (conv.weight, conv.bias),
# five for bn (bn.weight, bn.bias, bn.running_mean, bn.running_var, bn.num_batches_tracked)
full_state = {
"conv.weight",
"conv.bias",
"bn.weight",
"bn.bias",
"bn.running_mean",
"bn.running_var",
"bn.num_batches_tracked",
}
self.assertEqual(len(state.state), 7)
self.assertTrue(set(state.state) == full_state)
for _, val in state.state.items():
self.assertFalse(val.requires_grad)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
# test ema state that excludes buffers and frozen parameters
model.conv.weight.requires_grad = False
state1 = ema.EMAState.FromModel(model, include_frozen=False)
# should exclude frozen parameter: conv.weight
self.assertTrue(full_state - set(state1.state) == {"conv.weight"})
state2 = ema.EMAState.FromModel(model, include_buffer=False)
# should exclude buffers: bn.running_mean, bn.running_var, bn.num_batches_tracked
self.assertTrue(
full_state - set(state2.state)
== {"bn.running_mean", "bn.running_var", "bn.num_batches_tracked"}
)
state3 = ema.EMAState.FromModel(
model, include_frozen=False, include_buffer=False
)
# should exclude frozen param + buffers: conv.weight, bn.running_mean, bn.running_var, bn.num_batches_tracked
self.assertTrue(set(state3.state) == {"conv.bias", "bn.weight", "bn.bias"})
def test_emastate_saveload(self):
model = TestArch()
state = ema.EMAState.FromModel(model)
model1 = TestArch()
self.assertFalse(_compare_state_dict(model, model1))
state1 = ema.EMAState()
state1.load_state_dict(state.state_dict())
state1.apply_to(model1)
self.assertTrue(_compare_state_dict(model, model1))
@helper.skip_if_no_gpu
def test_emastate_crossdevice(self):
model = TestArch()
model.cuda()
# state on gpu
state = ema.EMAState.FromModel(model)
self.assertEqual(state.device, torch.device("cuda:0"))
# target model on cpu
model1 = TestArch()
state.apply_to(model1)
self.assertEqual(next(model1.parameters()).device, torch.device("cpu"))
self.assertTrue(_compare_state_dict(copy.deepcopy(model).cpu(), model1))
# state on cpu
state1 = ema.EMAState.FromModel(model, device="cpu")
self.assertEqual(state1.device, torch.device("cpu"))
# target model on gpu
model2 = TestArch()
model2.cuda()
state1.apply_to(model2)
self.assertEqual(next(model2.parameters()).device, torch.device("cuda:0"))
self.assertTrue(_compare_state_dict(model, model2))
def test_ema_updater(self):
model = TestArch()
state = ema.EMAState()
updated_model = TestArch()
updater = ema.EMAUpdater(state, decay=0.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 0.0, always use new model
self.assertTrue(_compare_state_dict(updated_model, cur))
updater = ema.EMAUpdater(state, decay=1.0)
updater.init_state(model)
for _ in range(3):
cur = TestArch()
updater.update(cur)
state.apply_to(updated_model)
# weight decay == 1.0, always use init model
self.assertTrue(_compare_state_dict(updated_model, model))
def test_ema_updater_decay(self):
state = ema.EMAState()
updater = ema.EMAUpdater(state, decay=0.7)
updater.init_state(TestArch(1.0))
gt_val = 1.0
gt_val_int = 1
for idx in range(3):
updater.update(TestArch(float(idx)))
updated_model = state.get_ema_model(TestArch())
gt_val = gt_val * 0.7 + float(idx) * 0.3
gt_val_int = int(gt_val_int * 0.7 + float(idx) * 0.3)
self.assertTrue(
_compare_state_dict(updated_model, TestArch(gt_val, gt_val_int))
)
class TestModelingModelEMAHook(unittest.TestCase):
def test_ema_hook(self):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL_EMA.ENABLED = True
# use new model weights
cfg.MODEL_EMA.DECAY = 0.0
cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
model = TestArch()
ema.may_build_model_ema(cfg, model)
self.assertTrue(hasattr(model, "ema_state"))
ema_hook = ema.EMAHook(cfg, model)
ema_hook.before_train()
ema_hook.before_step()
model.set_const_weights(2.0)
ema_hook.after_step()
ema_hook.after_train()
ema_checkpointers = ema.may_get_ema_checkpointer(cfg, model)
self.assertEqual(len(ema_checkpointers), 1)
out_model = TestArch()
ema_checkpointers["ema_state"].apply_to(out_model)
self.assertTrue(_compare_state_dict(out_model, model))
|
d2go-main
|
tests/modeling/test_modeling_ema.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import unittest
import torch
from d2go.runner import GeneralizedRCNNRunner
from detectron2.modeling import build_anchor_generator, build_backbone
from detectron2.modeling.proposal_generator import rpn
logger = logging.getLogger(__name__)
# overwrite configs if specified, otherwise default config is used
RPN_CFGS = {}
class TestRPNHeads(unittest.TestCase):
def test_build_rpn_heads(self):
"""Make sure rpn heads run"""
self.assertGreater(len(rpn.RPN_HEAD_REGISTRY._obj_map), 0)
for name, builder in rpn.RPN_HEAD_REGISTRY._obj_map.items():
logger.info("Testing {}...".format(name))
cfg = GeneralizedRCNNRunner.get_default_cfg()
if name in RPN_CFGS:
cfg.merge_from_file(RPN_CFGS[name])
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
rpn_input_shape = [backbone_shape[x] for x in cfg.MODEL.RPN.IN_FEATURES]
rpn_head = builder(cfg, rpn_input_shape)
in_channels = list(backbone_shape.values())[0].channels
num_anchors = build_anchor_generator(cfg, rpn_input_shape).num_cell_anchors[
0
]
N, C_in, H, W = 2, in_channels, 24, 32
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
LAYERS = len(cfg.MODEL.RPN.IN_FEATURES)
out = rpn_head([input] * LAYERS)
self.assertEqual(len(out), 2)
logits, bbox_reg = out
for idx in range(LAYERS):
self.assertEqual(
logits[idx].shape,
torch.Size(
[input.shape[0], num_anchors, input.shape[2], input.shape[3]]
),
)
self.assertEqual(
bbox_reg[idx].shape,
torch.Size(
[
logits[idx].shape[0],
num_anchors * 4,
logits[idx].shape[2],
logits[idx].shape[3],
]
),
)
def test_build_rpn_heads_with_rotated_anchor_generator(self):
"""Make sure rpn heads work with rotated anchor generator"""
self.assertGreater(len(rpn.RPN_HEAD_REGISTRY._obj_map), 0)
for name, builder in rpn.RPN_HEAD_REGISTRY._obj_map.items():
logger.info("Testing {}...".format(name))
cfg = GeneralizedRCNNRunner.get_default_cfg()
if name in RPN_CFGS:
cfg.merge_from_file(RPN_CFGS[name])
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
rpn_input_shape = [backbone_shape[x] for x in cfg.MODEL.RPN.IN_FEATURES]
rpn_head = builder(cfg, rpn_input_shape)
in_channels = list(backbone_shape.values())[0].channels
anchor_generator = build_anchor_generator(cfg, rpn_input_shape)
num_anchors = anchor_generator.num_cell_anchors[0]
box_dim = anchor_generator.box_dim
N, C_in, H, W = 2, in_channels, 24, 32
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
LAYERS = len(cfg.MODEL.RPN.IN_FEATURES)
out = rpn_head([input] * LAYERS)
self.assertEqual(len(out), 2)
logits, bbox_reg = out
for idx in range(LAYERS):
self.assertEqual(
logits[idx].shape,
torch.Size(
[input.shape[0], num_anchors, input.shape[2], input.shape[3]]
),
)
self.assertEqual(
bbox_reg[idx].shape,
torch.Size(
[
logits[idx].shape[0],
num_anchors * box_dim,
logits[idx].shape[2],
logits[idx].shape[3],
]
),
)
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_rpn_heads.py
|
d2go-main
|
tests/modeling/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.utils.testing import rcnn_helper as rh
from detectron2.structures import Boxes
class TestRCNNHelper(unittest.TestCase):
def test_get_instances_from_image(self):
boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
gt_kpts = torch.Tensor([75, 60, 1.0] * 21 + [175, 90, 1.0] * 21).reshape(
2, 21, 3
)
batched_inputs = rh.get_batched_inputs(2, boxes=boxes)
instances = rh.get_detected_instances_from_image(batched_inputs)
self.assertEqual(len(instances), 2)
self.assertArrayEqual(instances[0].pred_boxes.tensor, boxes.tensor)
self.assertArrayEqual(instances[0].pred_keypoints, gt_kpts)
def test_get_instances_from_image_scale_image(self):
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = rh.get_detected_instances_from_image([{"image": image}])[0].pred_boxes
self.assertArrayEqual(boxes.tensor, all_boxes.tensor)
# scale image by 0.5
scale_image = torch.nn.functional.interpolate(
torch.unsqueeze(image, 0),
scale_factor=(0.5, 0.5),
mode="bilinear",
align_corners=False,
recompute_scale_factor=False,
)[0]
sub_boxes = rh.get_detected_instances_from_image([{"image": scale_image}])[
0
].pred_boxes
self.assertArrayEqual(sub_boxes.tensor, [[25, 20, 50, 40], [75, 30, 100, 60]])
# scale image by 0.75
scale_image = torch.nn.functional.interpolate(
torch.unsqueeze(image, 0),
scale_factor=(0.75, 0.75),
mode="bilinear",
align_corners=False,
recompute_scale_factor=False,
)[0]
sub_boxes = rh.get_detected_instances_from_image([{"image": scale_image}])[
0
].pred_boxes
# [[37.5, 30, 75, 60], [112.5, 45, 150, 90]])
self.assertArrayEqual(sub_boxes.tensor, [[37, 30, 75, 60], [112, 45, 150, 90]])
def test_mock_rcnn_inference(self):
image_size = (1920, 1080)
resize_size = (398, 224)
scale_xy = (1080.0 / 224, 1920.0 / 398)
gt_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
gt_kpts = torch.Tensor([75, 60, 1.0] * 21 + [175, 90, 1.0] * 21).reshape(
2, 21, 3
)
# create inputs
batched_inputs = rh.get_batched_inputs(2, image_size, resize_size, gt_boxes)
# create model
model = rh.MockRCNNInference(image_size, resize_size)
# run without post processing
det_instances = model(batched_inputs, None, do_postprocess=False)
self.assertArrayAllClose(
det_instances[0].pred_boxes.tensor,
gt_boxes.tensor,
atol=1e-4,
)
self.assertArrayAllClose(
det_instances[0].pred_keypoints,
gt_kpts,
atol=1e-4,
)
# run with post processing
det_instances = model(batched_inputs, None, do_postprocess=True)
gt_boxes_scaled = gt_boxes.clone()
gt_boxes_scaled.scale(*scale_xy)
gt_kpts_scaled = torch.Tensor(
[75 * scale_xy[0], 60 * scale_xy[1], 1.0] * 21
+ [175 * scale_xy[0], 90 * scale_xy[1], 1.0] * 21
).reshape(2, 21, 3)
self.assertArrayAllClose(
det_instances[0]["instances"].pred_boxes.tensor,
gt_boxes_scaled.tensor,
atol=1e-4,
)
self.assertArrayAllClose(
det_instances[0]["instances"].pred_keypoints,
gt_kpts_scaled,
atol=1e-4,
)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
def assertArrayAllClose(self, a1, a2, rtol=1.0e-5, atol=1.0e-8):
self.assertTrue(np.allclose(a1, a2, rtol=rtol, atol=atol))
|
d2go-main
|
tests/modeling/test_rcnn_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import shutil
import tempfile
import unittest
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner import Detectron2GoRunner
from mobile_cv.predictor.api import create_predictor
def _get_batch(height, width, is_train):
def _get_frame():
random_image = torch.rand(3, height, width).to(torch.float32)
ret = {"image": random_image}
if is_train:
mask_size = (height, width)
random_mask = torch.randint(low=0, high=2, size=mask_size).to(torch.int64)
ret["sem_seg"] = random_mask
return ret
batch_size = 2 if is_train else 1
return [
{"filename": "some_file", "width": 100, "height": 100, **_get_frame()}
for _ in range(batch_size)
]
def _get_data_loader(height, width, is_train):
inputs = _get_batch(height, width, is_train)
def get_data_loader():
while True:
yield inputs
return get_data_loader()
def _get_input_dim(model):
h = w = max(model.backbone.size_divisibility, 1)
return h, w
class BaseSemanticSegTestCase:
class TemplateTestCase(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix="test_meta_arch_semantic_seg_")
self.addCleanup(shutil.rmtree, self.test_dir)
runner = Detectron2GoRunner()
self.cfg = runner.get_default_cfg()
self.setup_custom_test()
self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
self.test_model = runner.build_model(self.cfg, eval_only=True)
def setup_custom_test(self):
raise NotImplementedError()
def test_inference(self):
h, w = _get_input_dim(self.test_model)
inputs = _get_batch(h, w, False)
with torch.no_grad():
self.test_model(inputs)
def test_train(self):
h, w = _get_input_dim(self.test_model)
inputs = _get_batch(h, w, True)
self.test_model.train()
loss_dict = self.test_model(inputs)
losses = sum(loss_dict.values())
losses.backward()
def _test_export(self, predictor_type, compare_match=True):
h, w = _get_input_dim(self.test_model)
dl = _get_data_loader(h, w, False)
inputs = next(iter(dl))
output_dir = os.path.join(self.test_dir, "test_export")
predictor_path = convert_and_export_predictor(
self.cfg, self.test_model, predictor_type, output_dir, dl
)
predictor = create_predictor(predictor_path)
predicotr_outputs = predictor(inputs)
self.assertEqual(len(predicotr_outputs), len(inputs))
with torch.no_grad():
pytorch_outputs = self.test_model(inputs)
self.assertEqual(len(pytorch_outputs), len(inputs))
if compare_match:
for predictor_output, pytorch_output in zip(
predicotr_outputs, pytorch_outputs
):
torch.testing.assert_close(
predictor_output["sem_seg"], pytorch_output["sem_seg"]
)
class TestR50FPN(BaseSemanticSegTestCase.TemplateTestCase):
def setup_custom_test(self):
self.cfg.merge_from_file("detectron2://Misc/semantic_R_50_FPN_1x.yaml")
# discard pretrained backbone weights
self.cfg.merge_from_list(["MODEL.WEIGHTS", ""])
def test_export_torchscript(self):
self._test_export("torchscript", compare_match=True)
|
d2go-main
|
tests/modeling/test_meta_arch_semantic_seg.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import unittest
from typing import List
import d2go.runner.default_runner as default_runner
import torch
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.modeling.api import build_d2go_model, D2GoModelBuildResult
from d2go.registry.builtin import META_ARCH_REGISTRY, MODELING_HOOK_REGISTRY
@META_ARCH_REGISTRY.register()
class TestArch(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
def forward(self, x):
return x * 2
# create a wrapper of the model that add 1 to the output
class PlusOneWrapper(torch.nn.Module):
def __init__(self, model: torch.nn.Module):
super().__init__()
self.model = model
def forward(self, x):
return self.model(x) + 1
@MODELING_HOOK_REGISTRY.register()
class PlusOneHook(mh.ModelingHook):
def __init__(self, cfg):
super().__init__(cfg)
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
return PlusOneWrapper(model)
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
assert isinstance(model, PlusOneWrapper)
return model.model
# create a wrapper of the model that add 1 to the output
class TimesTwoWrapper(torch.nn.Module):
def __init__(self, model: torch.nn.Module):
super().__init__()
self.model = model
def forward(self, x):
return self.model(x) * 2
@MODELING_HOOK_REGISTRY.register()
class TimesTwoHook(mh.ModelingHook):
def __init__(self, cfg):
super().__init__(cfg)
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
return TimesTwoWrapper(model)
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
assert isinstance(model, TimesTwoWrapper)
return model.model
class TestModelingHook(unittest.TestCase):
def test_modeling_hook_simple(self):
model = TestArch(None)
hook = PlusOneHook(None)
model_with_hook = hook.apply(model)
self.assertEqual(model_with_hook(2), 5)
original_model = hook.unapply(model_with_hook)
self.assertEqual(model, original_model)
def test_modeling_hook_cfg(self):
"""Create model with modeling hook using build_model"""
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model_info: D2GoModelBuildResult = build_d2go_model(cfg)
model: torch.nn.Module = model_info.model
modeling_hooks: List[mh.ModelingHook] = model_info.modeling_hooks
self.assertEqual(model(2), 10)
self.assertEqual(len(modeling_hooks), 2)
self.assertTrue(hasattr(model, "_modeling_hooks"))
self.assertTrue(hasattr(model, "unapply_modeling_hooks"))
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
def test_modeling_hook_runner(self):
"""Create model with modeling hook from runner"""
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model = runner.build_model(cfg)
self.assertEqual(model(2), 10)
self.assertTrue(hasattr(model, "_modeling_hooks"))
self.assertTrue(hasattr(model, "unapply_modeling_hooks"))
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
default_runner._close_all_tbx_writers()
def test_modeling_hook_copy(self):
"""Create model with modeling hook, the model could be copied"""
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
cfg.MODEL.MODELING_HOOKS = ["PlusOneHook", "TimesTwoHook"]
model_info: D2GoModelBuildResult = build_d2go_model(cfg)
model: torch.nn.Module = model_info.model
modeling_hooks: List[mh.ModelingHook] = model_info.modeling_hooks
self.assertEqual(model(2), 10)
self.assertEqual(len(modeling_hooks), 2)
model_copy = copy.deepcopy(model)
orig_model = model.unapply_modeling_hooks()
self.assertIsInstance(orig_model, TestArch)
self.assertEqual(orig_model(2), 4)
orig_model_copy = model_copy.unapply_modeling_hooks()
self.assertEqual(orig_model_copy(2), 4)
|
d2go-main
|
tests/modeling/test_modeling_meta_arch_modeling_hook.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import d2go.modeling.image_pooler as image_pooler
import numpy as np
import torch
from d2go.utils.testing import rcnn_helper as rh
from detectron2.structures import Boxes
class TestModelingImagePooler(unittest.TestCase):
def test_image_pooler(self):
H, W = 8, 6
image = torch.zeros(3, H, W)
# xyxy
boxes = torch.Tensor([[2, 3, 5, 7]])
image[0, 3:7, 2:5] = 1
image[1, 3:7, 2:5] = 2
image[2, 3:7, 2:5] = 4
img_pooler = image_pooler.ImagePooler(resize_short=6, resize_max=12).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes)
# check pooled images
self.assertEqual(pooled_img.shape, torch.Size([3, 8, 6]))
self.assertArrayEqual(torch.unique(pooled_img[0, :, :]), [1])
self.assertArrayEqual(torch.unique(pooled_img[1, :, :]), [2])
self.assertArrayEqual(torch.unique(pooled_img[2, :, :]), [4])
# check pooled boxes, in xyxy format
self.assertArrayEqual(pooled_box, [[0, 0, 6, 8]])
# inverse of transforms
trans_inv = transforms.inverse()
# inverse of boxes, xyxy
inversed_box = trans_inv.apply_box(pooled_box)
self.assertArrayEqual(inversed_box, boxes)
pooled_sub_box = np.array([[2, 2, 4, 6]])
inversed_sub_box = trans_inv.apply_box(pooled_sub_box)
self.assertArrayEqual(inversed_sub_box, [[3, 4, 4, 6]])
def test_image_pooler_scale_box(self):
H, W = 8, 6
image = torch.zeros(3, H, W)
# xyxy
boxes = torch.Tensor([[2, 3, 5, 7]])
image[0, 3:7, 2:5] = 1
image[1, 3:7, 2:5] = 2
image[2, 3:7, 2:5] = 4
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=4.0
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes)
# check pooled images
self.assertEqual(pooled_img.shape, torch.Size([3, 8, 6]))
self.assertArrayEqual(pooled_img, image)
# check pooled boxes, in xyxy format, the box before scaling
self.assertArrayEqual(pooled_box, [[2, 3, 5, 7]])
def test_image_pooler_scale_box_large_crop_only(self):
"""Crop bbox"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
self.assertArrayEqual(boxes.tensor, [[50, 40, 200, 120]])
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=1.0
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 80, 150]))
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
self.assertArrayEqual(sub_boxes.tensor, [[0, 0, 50, 40], [100, 20, 150, 80]])
def test_image_pooler_scale_box_large_crop_and_scale(self):
"""Crop bbox that is scaled"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
img_pooler = image_pooler.ImagePooler(
resize_type=None, box_scale_factor=1.2
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 96, 180]))
# bbox with scaling in the original space
orig_crop_box = transforms.inverse().apply_box(
[0, 0, pooled_img.shape[2], pooled_img.shape[1]]
)
self.assertArrayEqual(orig_crop_box, [[35, 32, 215, 128]])
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
# gt_offset_xy = (50 - 35 = 15, 40 - 32 = 8)
self.assertArrayEqual(sub_boxes.tensor, [[15, 8, 65, 48], [115, 28, 165, 88]])
def test_image_pooler_scale_box_large_crop_scale_and_resize(self):
"""Crop bbox that is scaled, resize the cropped box"""
H, W = 398, 224
all_boxes = Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]]))
image = rh.get_batched_inputs(1, (H, W), (H, W), all_boxes)[0]["image"]
boxes = bu.get_box_union(all_boxes)
img_pooler = image_pooler.ImagePooler(
resize_type="resize_shortest",
resize_short=48,
resize_max=180,
box_scale_factor=1.2,
).eval()
pooled_img, pooled_box, transforms = img_pooler(image, boxes.tensor)
self.assertEqual(pooled_img.shape, torch.Size([3, 48, 90]))
# bbox with scaling in the original space
orig_crop_box = transforms.inverse().apply_box(
[0, 0, pooled_img.shape[2], pooled_img.shape[1]]
)
self.assertArrayEqual(orig_crop_box, [[35, 32, 215, 128]])
# bbox without scaling in the original space
orig_boxes = transforms.inverse().apply_box(pooled_box)
self.assertArrayEqual(orig_boxes, boxes.tensor)
sub_boxes = rh.get_detected_instances_from_image([{"image": pooled_img}])[
0
].pred_boxes
# [[7.5, 4, 32.5, 24], [57.5, 14, 82.5, 44]]
self.assertArrayEqual(sub_boxes.tensor, [[7, 4, 33, 24], [57, 14, 83, 44]])
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
|
d2go-main
|
tests/modeling/test_modeling_image_pooler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.modeling.kmeans_anchors import (
add_kmeans_anchors_cfg,
compute_kmeans_anchors,
compute_kmeans_anchors_hook,
)
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import register_toy_coco_dataset
from detectron2.data import DatasetCatalog, DatasetFromList, MapDataset
from detectron2.engine.train_loop import SimpleTrainer
from torch.utils.data.sampler import BatchSampler, Sampler
class IntervalSampler(Sampler):
def __init__(self, size: int, interval: int):
self._local_indices = range(0, size, interval)
def __iter__(self):
yield from self._local_indices
def __len__(self):
return len(self._local_indices)
def build_sequence_loader(cfg, dataset_name, mapper, total_samples, batch_size=1):
"""
Similar to `build_detection_test_loader` in the way that its sampler
samples dataset_dicts in order and only loops once.
"""
dataset_dicts = DatasetCatalog.get(dataset_name)
dataset = DatasetFromList(dataset_dicts)
dataset = MapDataset(dataset, mapper)
interval = max(1, int(len(dataset) / total_samples))
sampler = IntervalSampler(len(dataset), interval)
batch_sampler = BatchSampler(sampler, batch_size, drop_last=False)
def _trivial_batch_collator(batch):
return batch
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=_trivial_batch_collator,
)
return data_loader
class TestKmeansAnchors(unittest.TestCase):
def setUp(self):
self.runner = GeneralizedRCNNRunner()
def _get_default_cfg(self):
cfg = self.runner.get_default_cfg()
add_kmeans_anchors_cfg(cfg)
return cfg
@unittest.skip("This can only run locally and takes significant of time")
def test_matching_previous_results(self):
cfg = self._get_default_cfg()
cfg.INPUT.MIN_SIZE_TRAIN = (144,)
cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 10
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 512
cfg.MODEL.KMEANS_ANCHORS.DATASETS = ()
# NOTE: create a data loader that samples exact the same as previous
# implementation. In D2Go, we will rely on the train loader instead.
# NOTE: in order to load OV580_XRM dataset, change the IM_DIR to:
# "/mnt/vol/gfsai-east/aml/mobile-vision//dataset/oculus/hand_tracking//torch/Segmentation/OV580_XRM_640x480_V3_new_rerun/images" # noqa
data_loader = build_sequence_loader(
cfg,
# dataset_name="coco_2014_valminusminival",
# dataset_name="OV580_XRM_640x480_V3_train",
dataset_name="OV580_XRM_640x480_V3_heldOut_small_512",
mapper=self.runner.get_mapper(cfg, is_train=True),
total_samples=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
batch_size=3,
)
kmeans_anchors = compute_kmeans_anchors(
cfg, data_loader, sort_by_area=False, _stride=16, _legacy_plus_one=True
)
# Taken from D9849940
reference_anchors = np.array(
[
[-15.33554182, -15.29361029, 31.33554182, 31.29361029], # noqa
[-9.34156693, -9.32553548, 25.34156693, 25.32553548], # noqa
[-6.03052776, -6.02034167, 22.03052776, 22.02034167], # noqa
[-2.25951741, -2.182888, 18.25951741, 18.182888], # noqa
[-18.93553378, -18.93553403, 34.93553378, 34.93553403], # noqa
[-12.69068356, -12.73989029, 28.69068356, 28.73989029], # noqa
[-24.73489189, -24.73489246, 40.73489189, 40.73489246], # noqa
[-4.06014466, -4.06014469, 20.06014466, 20.06014469], # noqa
[-7.61036119, -7.60467538, 23.61036119, 23.60467538], # noqa
[-10.88200579, -10.87634414, 26.88200579, 26.87634414], # noqa
]
)
np.testing.assert_allclose(kmeans_anchors, reference_anchors, atol=1e-6)
def test_build_model(self):
cfg = self._get_default_cfg()
cfg.INPUT.MIN_SIZE_TRAIN = (60,)
cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = True
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 3
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 5
cfg.MODEL.KMEANS_ANCHORS.DATASETS = ("toy_dataset",)
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "KMeansAnchorGenerator"
with register_toy_coco_dataset(
"toy_dataset",
image_size=(80, 60), # w, h
num_images=cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG,
):
model = self.runner.build_model(cfg)
trainer = SimpleTrainer(model, data_loader=[], optimizer=None)
trainer_hooks = [compute_kmeans_anchors_hook(self.runner, cfg)]
trainer.register_hooks(trainer_hooks)
trainer.before_train()
anchor_generator = model.proposal_generator.anchor_generator
cell_anchors = list(anchor_generator.cell_anchors)
gt_anchors = np.array(
[
[-20, -15, 20, 15] # toy_dataset's bbox is half size of image
for _ in range(cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS)
]
)
np.testing.assert_allclose(cell_anchors[0], gt_anchors)
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_kmeans_anchors.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from typing import List
from unittest import mock
import numpy as np
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.modeling.distillation import (
_build_teacher,
_set_device,
BaseDistillationHelper,
CachedLayer,
compute_layer_losses,
DefaultLossCombiner,
DistillationModelingHook,
DomainAdaptation,
ExampleDistillationHelper,
get_default_kd_image_classification_layer_losses,
KnowledgeDistillation,
LabelDistillation,
LayerLossMetadata,
NoopPseudoLabeler,
PseudoLabeler,
record_layers,
register_layer_losses_and_to_device,
RelabelTargetInBatch,
set_cache_dict,
unrecord_layers,
)
from d2go.registry.builtin import (
DISTILLATION_ALGORITHM_REGISTRY,
DISTILLATION_HELPER_REGISTRY,
META_ARCH_REGISTRY,
)
from d2go.runner.config_defaults import add_distillation_configs
from d2go.runner.default_runner import BaseRunner
from d2go.utils.testing import helper
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.mixin import dynamic_mixin, remove_dynamic_mixin
class DivideInputBy2(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all targets by 2 and batch output"""
return [x / 2.0 for x in batched_inputs]
class DivideInputDictBy2(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all inputs by 2 and batch output
Should be used with a pseudo labeler that will unpack the
resulting tensor
"""
output = []
for d in batched_inputs:
output.append(d["input"] / 2.0)
return torch.stack(output)
class DivideInputBy2OutputDict(nn.Module):
def forward(self, batched_inputs: List):
"""Divide all targets by 2 and return dict output"""
return {i: x / 2.0 for i, x in enumerate(batched_inputs)}
class TimesTable5OutputDict(nn.Module):
def forward(self, batched_inputs: List):
"""Return first five entries of times table for each input with a dict output"""
return {i: [x * i for i in range(1, 6)] for i, x in enumerate(batched_inputs)}
class ConstantStrOutput(nn.Module):
def forward(self, batched_inputs: List):
"""Return some string"""
return "Testing!"
class AddOne(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.Tensor([1]))
def forward(self, x):
return x + self.weight
@property
def device(self):
return self.weight.device
class AddLayers(nn.Module):
def __init__(self):
super().__init__()
self.layer0 = AddOne()
self.layer1 = AddOne()
self.layer2 = AddOne()
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
if not self.training:
return x
return {"output": x}
@property
def device(self):
return self.layer0.weight.device
class SimpleAdd(nn.Module):
def forward(self, x, y):
return x + y
class SimpleMul(nn.Module):
def forward(self, x, y):
return x * y
class TestLabeler(PseudoLabeler):
def __init__(self, teacher):
self.teacher = teacher
def label(self, x):
return self.teacher(x)
@META_ARCH_REGISTRY.register()
class TestMetaArchAddRand(nn.Module):
def __init__(self, cfg):
super().__init__()
self.weight = nn.Parameter(torch.rand(1))
def forward(self, x):
return x + self.weight
@DISTILLATION_HELPER_REGISTRY.register()
class TestHelper(BaseDistillationHelper):
def get_pseudo_labeler(self):
"""Run teacher model on inputs"""
return TestLabeler(self.teacher)
def get_preprocess_student_input(self):
return lambda x: x + 1
def get_preprocess_teacher_input(self):
return lambda x: x + 2
def get_layer_losses(self, model=None):
return [
LayerLossMetadata(
loss=SimpleAdd(),
name="add",
layer0="layer0",
layer1="layer0",
),
LayerLossMetadata(
loss=SimpleMul(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
def get_combine_losses(self):
return lambda d: {
"output": d["output"] * 0.1,
"add": d["add"] * 0.5,
"mul": d["mul"] * 10.0,
}
class TestDAHelper(BaseDistillationHelper):
def get_preprocess_domain0_input(self):
return lambda x: x["real"]
def get_preprocess_domain1_input(self):
return lambda x: x["synthetic"]
def get_layer_losses(self, model=None):
return [
LayerLossMetadata(
loss=SimpleAdd(),
name="add",
layer0="layer0",
layer1="layer0",
)
]
def get_combine_losses(self):
return lambda d0, d1, da, ta: {
"real": d0["output"] * 0.1,
"synthetic": d1["output"] * 0.5,
"add": da["add"] * 10.0,
}
class Noop(nn.Module):
def forward(self, x):
return x
def _get_input_data(n: int = 2, use_input_target: bool = False, requires_grad=False):
"""Return input data, dict if use_input_target is specified"""
if not use_input_target:
return torch.randn(n, requires_grad=requires_grad)
return [
{
"input": torch.randn(1, requires_grad=requires_grad),
"target": torch.randn(1),
}
for _ in range(n)
]
def _get_default_cfg():
cfg = CfgNode()
cfg.MODEL = CfgNode()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "TestArch"
add_distillation_configs(cfg)
# model_ema.add_model_ema_configs(cfg)
cfg.DISTILLATION.ALGORITHM = "LabelDistillation"
cfg.DISTILLATION.HELPER = "BaseDistillationHelper"
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = ""
cfg.DISTILLATION.TEACHER.DEVICE = ""
return cfg
class TestDistillation(unittest.TestCase):
def test_add_distillation_configs(self):
"""Check default config"""
cfg = CfgNode()
add_distillation_configs(cfg)
self.assertTrue(isinstance(cfg.DISTILLATION.TEACHER, CfgNode))
# check teacher model config is clone of student model
self.assertEqual(cfg.DISTILLATION.TEACHER.CONFIG_FNAME, "")
def test_build_teacher_torchscript(self):
"""Check can build teacher using torchscript fname in config"""
# create torchscript
model = DivideInputBy2()
traced_model = torch.jit.trace(model, torch.randn(5))
with make_temp_directory("tmp") as output_dir:
fname = f"{output_dir}/tmp.pt"
torch.jit.save(traced_model, fname)
# create teacher
cfg = _get_default_cfg()
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = fname
teacher = _build_teacher(cfg)
batched_inputs = torch.randn(5)
gt = batched_inputs / 2.0
output = teacher(batched_inputs)
torch.testing.assert_close(torch.Tensor(output), gt)
@helper.skip_if_no_gpu
def test_build_teacher_torchscript_gpu(self):
"""Check teacher moved to cuda"""
model = AddOne()
traced_model = torch.jit.trace(model, torch.randn(5))
with make_temp_directory("tmp") as output_dir:
fname = f"{output_dir}/tmp.pt"
torch.jit.save(traced_model, fname)
# create teacher
cfg = _get_default_cfg()
cfg.MODEL.DEVICE = "cuda"
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = fname
teacher = _build_teacher(cfg)
batched_inputs = torch.randn(5).to("cuda")
gt = batched_inputs + torch.Tensor([1]).to("cuda")
output = teacher(batched_inputs)
torch.testing.assert_close(torch.Tensor(output), gt)
def test_build_teacher_config(self):
"""Check build pytorch model using config"""
# build model
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
gt_model = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
# save model
checkpointer = DetectionCheckpointer(gt_model, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
# load model and compare to gt
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
model = _build_teacher(cfg)
self.assertEqual(gt_model.weight, model.weight)
def test_build_teacher_none(self):
"""Check that we can ignore building the teacher"""
# build model
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
cfg.DISTILLATION.TEACHER.TYPE = "no_teacher"
model = _build_teacher(cfg)
self.assertTrue(isinstance(model, nn.Module))
def test_override_teacher_config_gpu_on_cpu(self):
"""Teacher cuda model can be run on cpu if specified in config"""
# build model where teacher is specified on gpu but user overrides cpu
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
gt_model = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
# save model
checkpointer = DetectionCheckpointer(gt_model, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
cfg.MODEL.DEVICE = "cuda"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
# load model and compare to gt
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
cfg.DISTILLATION.TEACHER.DEVICE = "cpu"
model = _build_teacher(cfg)
self.assertEqual(gt_model.weight, model.weight)
def test_set_device(self):
"""Check teacher device is set"""
# without attr
model = Noop()
self.assertFalse(hasattr(model, "device"))
device = torch.device("cpu")
# without property
model = _set_device(model, device)
self.assertEqual(model.device, device)
# with property
model = AddOne()
model = _set_device(model, device)
self.assertEqual(model.device, device)
def test_cached_layer_tensor(self):
"""Check cached layer saves layer output"""
model = AddOne()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_list(self):
"""Check cached layer saves list"""
model = DivideInputBy2()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_tuple(self):
"""Check cached layer saves list"""
model = DivideInputBy2()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = (torch.randn(1) for _ in range(2))
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_dict(self):
"""Check cached layer saves dict"""
model = DivideInputBy2OutputDict()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_arbitrary(self):
"""Check cached layer saves arbitrary nested data structure"""
model = TimesTable5OutputDict()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
output = model(input)
torch.testing.assert_close(output, cache["test_layer"])
def test_cached_layer_unsupported(self):
"""Check cached layer doesn't save unsupported data type like strings"""
model = ConstantStrOutput()
cache = {}
dynamic_mixin(
model,
CachedLayer,
init_dict={"label": "test_layer", "cache": cache},
)
input = [torch.randn(1) for _ in range(2)]
self.assertRaises(ValueError, model, input)
def test_record_layers(self):
"""Check we can record specified layer"""
model = AddLayers()
cache = record_layers(model, ["", "layer0", "layer1", "layer2"])
input = torch.Tensor([0])
output = model(input)
torch.testing.assert_close(cache["layer0"], torch.Tensor([1]))
torch.testing.assert_close(cache["layer1"], torch.Tensor([2]))
torch.testing.assert_close(cache["layer2"], torch.Tensor([3]))
torch.testing.assert_close(cache[""], output)
def test_unrecord_layers(self):
"""Check we can remove a recorded layer"""
model = AddLayers()
_ = record_layers(model, ["", "layer0", "layer1", "layer2"])
unrecord_layers(model, ["", "layer0"])
self.assertFalse(hasattr(model.layer0, "cache"))
def test_compute_layer_losses(self):
"""Check iterating over loss dicts"""
layer_losses = [
LayerLossMetadata(
loss=lambda x, y: x + y, name="add", layer0="l00", layer1="l10"
),
LayerLossMetadata(
loss=lambda x, y: x / y, name="div", layer0="l01", layer1="l11"
),
]
layer0_cache = {"l00": torch.randn(1), "l01": torch.randn(1)}
layer1_cache = {"l10": torch.randn(1), "l11": torch.randn(1)}
output = compute_layer_losses(layer_losses, layer0_cache, layer1_cache)
torch.testing.assert_close(
output["add"], layer0_cache["l00"] + layer1_cache["l10"]
)
torch.testing.assert_close(
output["div"], layer0_cache["l01"] / layer1_cache["l11"]
)
def test_set_cache_dict(self):
"""Check we can swap the cache dict used when recording layers"""
model = AddLayers()
cache = record_layers(model, ["", "layer0", "layer1", "layer2"])
new_cache = {}
set_cache_dict(model, new_cache)
input = torch.Tensor([0])
output = model(input)
self.assertEqual(cache, {})
torch.testing.assert_close(new_cache["layer0"], torch.Tensor([1]))
torch.testing.assert_close(new_cache["layer1"], torch.Tensor([2]))
torch.testing.assert_close(new_cache["layer2"], torch.Tensor([3]))
torch.testing.assert_close(new_cache[""], output)
def test_register_layer_losses(self):
"""Check losses can be registered to model"""
model = AddOne()
ll = [
LayerLossMetadata(
loss=SimpleAdd(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
registered_losses = register_layer_losses_and_to_device(ll, model)
self.assertTrue(hasattr(model, "mul"))
self.assertEqual(model.mul, registered_losses[0].loss)
@helper.skip_if_no_gpu
def test_register_layer_losses_and_to_device(self):
"""Check losses can be registered to model"""
model = AddOne()
model = model.to("cuda")
ll = [
LayerLossMetadata(
loss=AddOne(),
name="mul",
layer0="layer1",
layer1="layer1",
),
]
register_layer_losses_and_to_device(ll, model)
self.assertEqual(model.mul.device, model.device)
class TestPseudoLabeler(unittest.TestCase):
def test_noop(self):
"""Check noop"""
pseudo_labeler = NoopPseudoLabeler()
x = np.random.randn(1)
output = pseudo_labeler.label(x)
torch.testing.assert_close(x, output)
def test_relabeltargetinbatch(self):
"""Check target is relabed using teacher"""
teacher = DivideInputDictBy2()
teacher.eval()
teacher.device = torch.device("cpu")
relabeler = RelabelTargetInBatch(teacher=teacher)
batched_inputs = _get_input_data(n=2, use_input_target=True)
gt = [{"input": d["input"], "target": d["input"] / 2.0} for d in batched_inputs]
outputs = relabeler.label(batched_inputs)
torch.testing.assert_close(outputs, gt)
class TestDistillationHelper(unittest.TestCase):
def test_registry(self):
"""Check base class in registry"""
self.assertTrue("BaseDistillationHelper" in DISTILLATION_HELPER_REGISTRY)
def test_base_distillation_helper(self):
"""Check base distillation helper returns input as output"""
dh = BaseDistillationHelper(cfg=None, teacher=None)
pseudo_labeler = dh.get_pseudo_labeler()
self.assertTrue(isinstance(pseudo_labeler, NoopPseudoLabeler))
def test_example_distillation_helper(self):
"""Example distillation uses teacher to relabel targets"""
teacher = Noop()
dh = ExampleDistillationHelper(cfg=None, teacher=teacher)
pseudo_labeler = dh.get_pseudo_labeler()
self.assertTrue(isinstance(pseudo_labeler, RelabelTargetInBatch))
self.assertTrue(isinstance(pseudo_labeler.teacher, Noop))
class TestDistillationAlgorithm(unittest.TestCase):
class LabelDistillationNoop(LabelDistillation, Noop):
"""Distillation should be used with dynamic mixin so we create
a new class with mixin of a noop to test"""
pass
def test_registry(self):
"""Check distillation teacher in registry"""
for algorithm in [
"LabelDistillation",
"KnowledgeDistillation",
"DomainAdaptation",
]:
self.assertTrue(algorithm in DISTILLATION_ALGORITHM_REGISTRY)
def test_label_distillation_inference(self):
"""Check inference defaults to student
Use LabelDistillationNoop to set student model to noop
"""
batched_inputs = _get_input_data(n=2)
gt = batched_inputs.detach().clone()
model = self.LabelDistillationNoop()
model.dynamic_mixin_init(
distillation_helper=TestHelper(cfg=None, teacher=DivideInputBy2()),
)
model.eval()
output = model(batched_inputs)
np.testing.assert_array_equal(output, gt)
def test_label_distillation_training(self):
"""Check training uses pseudo labeler
Distillation teacher should run the teacher model on the inputs and
then pass to the noop
"""
batched_inputs = _get_input_data(n=2, requires_grad=True)
gt = [x / 2.0 for x in batched_inputs]
model = self.LabelDistillationNoop()
model.dynamic_mixin_init(
distillation_helper=TestHelper(cfg=None, teacher=DivideInputBy2()),
)
model.train()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
sum(output).backward()
torch.testing.assert_close(batched_inputs.grad, torch.Tensor([0.5, 0.5]))
def test_kd_inference(self):
"""Check inference defaults to student (and preprocessing)"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
model.eval()
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output, input + 4.0)
def test_kd_train(self):
"""Check train pass results in updated loss output"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
model.train()
input = torch.randn(1)
output = model(input)
torch.testing.assert_close(output["output"], (input + 4.0) * 0.1)
torch.testing.assert_close(output["add"], ((input + 2.0) + (input + 3.0)) * 0.5)
torch.testing.assert_close(output["mul"], (input + 3.0) * (input + 4.0) * 10.0)
def test_kd_remove_dynamic_mixin(self):
"""Check removing dynamic mixin removes cached layers"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=AddLayers())
model = AddLayers()
dynamic_mixin(
model,
KnowledgeDistillation,
init_dict={"distillation_helper": distillation_helper},
)
remove_dynamic_mixin(model)
for module in model.modules():
self.assertFalse(hasattr(module, "cache"))
def test_da_inference(self):
"""Check inference defaults to student (and preprocessing)"""
distillation_helper = TestDAHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
model.eval()
input = {"real": torch.randn(1), "synthetic": torch.randn(1)}
output = model(input)
torch.testing.assert_close(output, input["real"] + 3.0)
def test_da_train(self):
"""Check train pass results in updated loss output"""
distillation_helper = TestDAHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
model.train()
input = {"real": torch.randn(1), "synthetic": torch.randn(1)}
output = model(input)
self.assertEqual(set(output.keys()), {"real", "synthetic", "add"})
torch.testing.assert_close(output["real"], (input["real"] + 3.0) * 0.1)
torch.testing.assert_close(
output["synthetic"], (input["synthetic"] + 3.0) * 0.5
)
torch.testing.assert_close(
output["add"], ((input["real"] + 1.0) + (input["synthetic"] + 1.0)) * 10.0
)
def test_da_remove_dynamic_mixin(self):
"""Check removing dynamic mixin removes cached layers"""
distillation_helper = TestHelper(cfg=CfgNode(), teacher=nn.Identity())
model = AddLayers()
dynamic_mixin(
model,
DomainAdaptation,
init_dict={"distillation_helper": distillation_helper},
)
remove_dynamic_mixin(model)
for module in model.modules():
self.assertFalse(hasattr(module, "cache"))
class TestDistillationModelingHook(unittest.TestCase):
_build_teacher_ref = "d2go.modeling.distillation._build_teacher"
def test_exists(self):
"""Check that the hook is registered"""
self.assertTrue("DistillationModelingHook" in mh.MODELING_HOOK_REGISTRY)
def test_init(self):
"""Check that we can build hook"""
cfg = _get_default_cfg()
with mock.patch(self._build_teacher_ref):
DistillationModelingHook(cfg)
def test_apply(self):
"""Check new model has distillation methods"""
model = Noop()
model.test_attr = "12345"
cfg = _get_default_cfg()
cfg.DISTILLATION.HELPER = "TestHelper"
with mock.patch(self._build_teacher_ref):
hook = DistillationModelingHook(cfg)
hook.apply(model)
# set teacher manually to override _build_teacher
model.pseudo_labeler.teacher = DivideInputBy2()
# check distillation attrs
self.assertTrue(isinstance(model.distillation_helper, TestHelper))
self.assertEqual(model._original_model_class, Noop)
# check retains attrs
self.assertTrue(hasattr(model, "test_attr"))
self.assertEqual(model.test_attr, "12345")
# check inference uses the baseline model which is a noop
batched_inputs = _get_input_data(n=2)
model.eval()
gt = batched_inputs.detach().clone()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
# check training uses the pseudo labeler
model.train()
gt = [x / 2.0 for x in batched_inputs]
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
def test_unapply(self):
"""Check removing distillation"""
model = Noop()
cfg = _get_default_cfg()
with mock.patch(self._build_teacher_ref):
hook = DistillationModelingHook(cfg)
hook.apply(model)
hook.unapply(model)
for distillation_attr in [
"distillation_helper",
"_original_model_class",
]:
self.assertFalse(hasattr(model, distillation_attr))
# check forward is the original noop
batched_inputs = _get_input_data(n=2)
gt = batched_inputs.detach().clone()
model.train()
output = model(batched_inputs)
torch.testing.assert_close(output, gt)
class TestDistillationMiscTests(unittest.TestCase):
def test_teacher_outside_updated_parameters(self):
"""
Check that teacher values are ignored when updating student
The teacher can often be referenced in the mixed in model. A common
example is when the teacher is an attributed of the distillation
helper.
=> DistillationModel.distillation_helper.teacher
This raises the question of whether the teacher model will be affected
by calls to the mixed in model:
DisillationModel.train() => does teacher switch to training?
setup_qat(DistillationModel) => will fuse occur on the teacher modules?
The answer to these questions should be no as we want the teacher to remain static
during training (unless specified). This is the case as long as teacher is an
attribute of a non-module class (e.g., distillation_helper). This is because
modules are registered in PyTorch as part of __setattr__. __setattr__ only checks
if the value is a module or parameter. If the value is an object
(e.g., distillation_helper) which contains modules, these modules are ignored.
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module.register_parameter
This unittest builds the teacher model and checks that only the student
parameter is registered.
"""
cfg = _get_default_cfg()
cfg.MODEL.META_ARCHITECTURE = "TestMetaArchAddRand"
prebuilt_teacher = BaseRunner().build_model(cfg)
with make_temp_directory("tmp") as output_dir:
checkpointer = DetectionCheckpointer(prebuilt_teacher, save_dir=output_dir)
checkpointer.save("checkpoint")
cfg.MODEL.WEIGHTS = f"{output_dir}/checkpoint.pth"
config_fname = f"{output_dir}/config.yaml"
with PathManager.open(config_fname, "w") as f:
f.write(cfg.dump())
cfg.DISTILLATION.TEACHER.TYPE = "config"
cfg.DISTILLATION.TEACHER.CONFIG_FNAME = config_fname
cfg.DISTILLATION.HELPER = "TestHelper"
cfg.MODEL.MODELING_HOOKS = ["DistillationModelingHook"]
distilled_model = BaseRunner().build_model(cfg)
self.assertEqual(len(list(distilled_model.parameters())), 1)
class TestDistillationDefaults(unittest.TestCase):
def test_kd_image_classification_layer_losses(self):
"""Check the default returns a list of layerlossmetadata"""
layer_losses = get_default_kd_image_classification_layer_losses()
self.assertTrue(isinstance(layer_losses, List))
self.assertTrue(isinstance(layer_losses[0], LayerLossMetadata))
def test_default_loss_combiner(self):
"""Check combiner multiplies loss by weights"""
weights = {"a": torch.randn(1), "b": torch.randn(1)}
combiner = DefaultLossCombiner(weights)
input = {"a": 1.0, "b": 10.0}
output = combiner(input)
torch.testing.assert_close(output["a"], input["a"] * weights["a"])
torch.testing.assert_close(output["b"], input["b"] * weights["b"])
|
d2go-main
|
tests/modeling/test_modeling_distillation.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from detectron2.modeling.box_regression import Box2BoxTransform
class TestBox2BoxTransform(unittest.TestCase):
def test_box2box_transform(self):
"""Match unit test UtilsBoxesTest.TestBboxTransformRandom in
caffe2/operators/generate_proposals_op_util_boxes_test.cc
"""
box2box_transform = Box2BoxTransform(weights=(1.0, 1.0, 1.0, 1.0))
bbox = torch.from_numpy(
np.array(
[
175.62031555,
20.91103172,
253.352005,
155.0145874,
169.24636841,
4.85241556,
228.8605957,
105.02092743,
181.77426147,
199.82876587,
192.88427734,
214.0255127,
174.36262512,
186.75761414,
296.19091797,
231.27906799,
22.73153877,
92.02596283,
135.5695343,
208.80291748,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
deltas = torch.from_numpy(
np.array(
[
0.47861834,
0.13992102,
0.14961673,
0.71495209,
0.29915856,
-0.35664671,
0.89018666,
0.70815367,
-0.03852064,
0.44466892,
0.49492538,
0.71409376,
0.28052918,
0.02184832,
0.65289006,
1.05060139,
-0.38172557,
-0.08533806,
-0.60335309,
0.79052375,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
gt_bbox = (
np.array(
[
206.949539,
-30.715202,
297.387665,
244.448486,
143.871216,
-83.342888,
290.502289,
121.053398,
177.430283,
198.666245,
196.295273,
228.703079,
152.251892,
145.431564,
387.215454,
274.594238,
5.062420,
11.040955,
66.328903,
269.686218,
]
)
.astype(np.float32)
.reshape(-1, 4)
)
# Detectron2 removed box plus one
bbox[:, 2] += 1
bbox[:, 3] += 1
gt_bbox[:, 2] += 1
gt_bbox[:, 3] += 1
results = box2box_transform.apply_deltas(deltas, bbox)
np.testing.assert_allclose(results.detach().numpy(), gt_bbox, atol=1e-4)
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_box2box_transform.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch.nn as nn
from d2go.model_zoo import model_zoo
class TestD2GoModelZoo(unittest.TestCase):
def test_model_zoo_pretrained(self):
configs = list(model_zoo._ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX.keys())
for cfgfile in configs:
model = model_zoo.get(cfgfile, trained=True)
self.assertTrue(isinstance(model, nn.Module))
if __name__ == "__main__":
unittest.main()
|
d2go-main
|
tests/modeling/test_model_zoo.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
import torch
from d2go.runner.default_runner import GeneralizedRCNNRunner
from d2go.tools.exporter import main
from d2go.utils.testing.data_loader_helper import create_local_dataset
from d2go.utils.testing.rcnn_helper import get_quick_test_config_opts
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.oss_utils import is_oss
def maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
with make_temp_directory("export_demo") as tmp_dir:
# use a fake dataset for ci
dataset_name = create_local_dataset(tmp_dir, 5, 224, 224)
config_list = [
"DATASETS.TRAIN",
(dataset_name,),
"DATASETS.TEST",
(dataset_name,),
]
# START_WIKI_EXAMPLE_TAG
cfg = GeneralizedRCNNRunner.get_default_cfg()
cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
cfg.merge_from_list(get_quick_test_config_opts())
cfg.merge_from_list(config_list)
# equivalent to running:
# exporter.par --runner GeneralizedRCNNRunner --config-file config.yaml --predictor-types torchscript tourchscript@c2_ops --output-dir tmp_dir
_ = main(
cfg,
tmp_dir,
GeneralizedRCNNRunner,
predictor_types=["torchscript@c2_ops", "torchscript"],
)
# the path can be fetched from the return of main, here just use hard-coded values
torchvision_ops_model = torch.jit.load(
os.path.join(tmp_dir, "torchscript", "model.jit")
)
caffe2_ops_model = torch.jit.load(
os.path.join(tmp_dir, "torchscript@c2_ops", "model.jit")
)
# Running inference using torchvision-style format
image = torch.zeros(1, 64, 96) # chw 3D tensor
# The exported model can run on both cpu/gpu
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torchvision_ops_model = torchvision_ops_model.to(device)
torchvision_style_outputs = torchvision_ops_model(
image
) # suppose N instances are detected
# NOTE: the output are flattened tensors of the real output (which is a dict), they're
# ordered by the key in dict, which is deterministic for the given model, but it might
# be difficult to figure out just from model.jit file. The predictor_info.json from
# the same directory contains the `outputs_schema`, which indicate how the final output
# is constructed from flattened tensors.
(
pred_boxes, # torch.Size([N, 4])
pred_classes, # torch.Size([N])
pred_masks, # torch.Size([N, 1, Hmask, Wmask])
scores, # torch.Size([N])
image_sizes, # torch.Size([2])
) = torchvision_style_outputs
self.assertTrue(
all(
x.device == torch.device(device) for x in torchvision_style_outputs[:4]
),
torchvision_style_outputs,
)
torch.testing.assert_close(image_sizes, torch.tensor([64, 96]))
# Running inference using caffe2-style format
data = torch.zeros(1, 1, 64, 96)
im_info = torch.tensor([[64, 96, 1.0]])
caffe2_style_outputs = caffe2_ops_model([data, im_info])
# NOTE: the output order is determined in the order of creating the tensor during
# forward function, it's also follow the order of original Caffe2 model.
roi_bbox_nms = caffe2_style_outputs[0] # torch.Size([N, 4])
roi_score_nms = caffe2_style_outputs[1] # torch.Size([N])
roi_class_nms = caffe2_style_outputs[2] # torch.Size([N])
mask_fcn_probs = caffe2_style_outputs[3] # torch.Size([N, Cmask, Hmask, Wmask])
# relations between torchvision-style outputs and caffe2-style outputs
torch.testing.assert_close(pred_boxes, roi_bbox_nms, check_device=False)
torch.testing.assert_close(
pred_classes, roi_class_nms.to(torch.int64), check_device=False
)
torch.testing.assert_close(
pred_masks,
mask_fcn_probs[:, roi_class_nms.to(torch.int64), :, :],
check_device=False,
)
torch.testing.assert_close(scores, roi_score_nms, check_device=False)
# END_WIKI_EXAMPLE_TAG
class TestOptimizer(unittest.TestCase):
@unittest.skipIf(is_oss(), "Caffe2 is not available for OSS")
def test_maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self):
maskrcnn_export_caffe2_vs_torchvision_opset_format_example(self)
|
d2go-main
|
tests/modeling/test_rcnn_export_example.py
|
#!/usr/bin/env python3
import os
import tempfile
import unittest
from collections import defaultdict
import torch
from d2go.evaluation.evaluator import inference_on_dataset, ResultCache
from detectron2.evaluation import DatasetEvaluator, DatasetEvaluators
class EvaluatorForTest(DatasetEvaluator):
def __init__(self):
self.results = []
def reset(self):
self.results.clear()
def process(self, inputs, outputs):
self.results.append(outputs)
def evaluate(self):
return sum(self.results)
class EvaluatorWithCheckpointForTest(DatasetEvaluator):
def __init__(self, save_dir):
self.results = []
self.result_cache = ResultCache(save_dir)
self._call_count = defaultdict(int)
def reset(self):
self.results.clear()
self._call_count["reset"] += 1
def has_finished_process(self):
return self.result_cache.has_cache()
def process(self, inputs, outputs):
assert not self.result_cache.has_cache()
self.results.append(outputs)
self._call_count["process"] += 1
def evaluate(self):
if not self.result_cache.has_cache():
self.result_cache.save(self.results)
else:
self.results = self.result_cache.load()
self._call_count["evaluate"] += 1
return sum(self.results)
class Model(torch.nn.Module):
def forward(self, x):
return x
class TestEvaluator(unittest.TestCase):
def test_inference(self):
model = Model()
evaluator = EvaluatorForTest()
data_loader = [1, 2, 3, 4, 5]
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
def test_inference_with_checkpoint(self):
with tempfile.TemporaryDirectory() as save_dir:
model = Model()
evaluator = EvaluatorWithCheckpointForTest(save_dir)
self.assertFalse(evaluator.has_finished_process())
data_loader = [1, 2, 3, 4, 5]
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
self.assertEqual(evaluator._call_count["reset"], 1)
self.assertEqual(evaluator._call_count["process"], 5)
self.assertEqual(evaluator._call_count["evaluate"], 1)
# run again with cache
self.assertTrue(evaluator.has_finished_process())
results = inference_on_dataset(model, data_loader, evaluator)
self.assertEqual(results, 15)
self.assertEqual(evaluator._call_count["reset"], 2)
self.assertEqual(evaluator._call_count["process"], 5)
self.assertEqual(evaluator._call_count["evaluate"], 2)
self.assertTrue(os.path.isfile(evaluator.result_cache.cache_file))
def test_evaluators_patch(self):
with tempfile.TemporaryDirectory() as save_dir:
cp_evaluator = EvaluatorWithCheckpointForTest(save_dir)
evaluator = DatasetEvaluators([cp_evaluator])
self.assertFalse(evaluator.has_finished_process())
cp_evaluator.reset()
cp_evaluator.process(1, 1)
cp_evaluator.evaluate()
self.assertTrue(evaluator.has_finished_process())
|
d2go-main
|
tests/evaluation/test_evaluator.py
|
d2go-main
|
tests/evaluation/__init__.py
|
|
#!/usr/bin/env python3
import unittest
import torch
from d2go.evaluation.prediction_count_evaluation import PredictionCountEvaluator
from detectron2.structures.instances import Instances
class TestPredictionCountEvaluation(unittest.TestCase):
def setUp(self):
self.evaluator = PredictionCountEvaluator()
image_size = (224, 224)
self.mock_outputs = [
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8, 0.7]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8, 0.7]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9, 0.8]))},
{"instances": Instances(image_size, scores=torch.Tensor([0.9]))},
]
# PredictionCountEvaluator does not depend on inputs
self.mock_inputs = [None] * len(self.mock_outputs)
def test_process_evaluate_reset(self):
self.assertEqual(len(self.evaluator.prediction_counts), 0)
self.assertEqual(len(self.evaluator.confidence_scores), 0)
# Test that `process` registers the outputs.
self.evaluator.process(self.mock_inputs, self.mock_outputs)
self.assertListEqual(self.evaluator.prediction_counts, [3, 3, 2, 2, 1])
self.assertEqual(len(self.evaluator.confidence_scores), 11)
# Test that `evaluate` returns the correct metrics.
output_metrics = self.evaluator.evaluate()
self.assertDictAlmostEqual(
output_metrics,
{
"false_positives": {
"predictions_per_image": 11 / 5,
"confidence_per_prediction": (0.9 * 5 + 0.8 * 4 + 0.7 * 2) / 11,
}
},
)
# Test that `reset` clears the evaluator state.
self.evaluator.reset()
self.assertEqual(len(self.evaluator.prediction_counts), 0)
self.assertEqual(len(self.evaluator.confidence_scores), 0)
def assertDictAlmostEqual(self, dict1, dict2):
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
# Assert lists are equal, irrespective of ordering
self.assertCountEqual(keys1, keys2)
for k, v1 in dict1.items():
v2 = dict2[k]
if isinstance(v2, list):
self.assertListEqual(v1, v2)
elif isinstance(v2, dict):
self.assertDictAlmostEqual(v1, v2)
else:
self.assertAlmostEqual(v1, v2)
|
d2go-main
|
tests/evaluation/test_prediction_count_evaluation.py
|
d2go-main
|
tests/export/__init__.py
|
|
import unittest
from d2go.export.torchscript import (
MobileOptimizationConfig,
update_export_kwargs_from_export_method,
)
@update_export_kwargs_from_export_method
def mock_export(cls, model, input_args, save_path, export_method, **export_kwargs):
# Return the export kwargs, so that we can check to make sure it's set as expected
return export_kwargs
class TestTorchscriptExportMethods(unittest.TestCase):
def test_update_export_kwargs_from_export_method(self):
_empty_export_kwargs = {}
def try_mock_export(export_method: str, export_kwargs=_empty_export_kwargs):
return mock_export(
cls=None,
model=None,
input_args=None,
save_path=None,
export_method=export_method,
**export_kwargs,
)
export_method_string = "torchscript"
new_export_kwargs = try_mock_export(export_method_string)
self.assertNotIn("mobile_optimization", new_export_kwargs)
export_method_string = "torchscript_mobile"
new_export_kwargs = try_mock_export(export_method_string)
self.assertIn("mobile_optimization", new_export_kwargs)
self.assertEquals(
type(new_export_kwargs["mobile_optimization"]),
MobileOptimizationConfig,
)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "CPU")
export_method_string = "torchscript_mobile-metal"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "metal")
export_method_string = "torchscript_mobile-vulkan"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["mobile_optimization"].backend, "vulkan")
export_method_string = "torchscript_mobile@tracing"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["jit_mode"], "trace")
export_method_string = "torchscript_mobile@scripting"
new_export_kwargs = try_mock_export(export_method_string)
self.assertEquals(new_export_kwargs["jit_mode"], "script")
|
d2go-main
|
tests/export/test_torchscript.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import unittest
from typing import List
import torch
import torch.nn as nn
from d2go.export.api import FuncInfo, PredictorExportConfig
from d2go.export.exporter import convert_and_export_predictor
from d2go.export.torchscript import (
DefaultTorchscriptExport,
TracingAdaptedTorchscriptExport,
)
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.predictor.api import create_predictor
from parameterized import parameterized
class SimpleModel(nn.Module):
def forward(self, x):
return 2 * x
def prepare_for_export(self, cfg, inputs, predictor_type):
# pre/post processing and run_func are default values
return PredictorExportConfig(
model=self,
# model(x) -> model(*(x,))
data_generator=lambda x: (x,),
)
class TwoPartSimpleModel(nn.Module):
"""
Suppose there're some function in the middle that can't be traced, therefore we
need to export the model as two parts.
"""
def __init__(self):
super().__init__()
self.part1 = SimpleModel()
self.part2 = SimpleModel()
def forward(self, x):
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = self.part2(x)
return x
def prepare_for_export(self, cfg, inputs, predictor_type):
def data_generator(x):
part1_args = (x,)
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
part2_args = (x,)
return {"part1": part1_args, "part2": part2_args}
return PredictorExportConfig(
model={"part1": self.part1, "part2": self.part2},
data_generator=data_generator,
run_func_info=FuncInfo.gen_func_info(TwoPartSimpleModel.RunFunc, params={}),
)
@staticmethod
def non_traceable_func(x):
return x + 1 if len(x.shape) > 3 else x - 1
class RunFunc(object):
def __call__(self, model, x):
assert isinstance(model, dict)
x = model["part1"](x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = model["part2"](x)
return x
class ScriptingOnlyModel(nn.Module):
"""
Example of a model that requires scripting (eg. having control loop).
"""
def forward(self, inputs: List[torch.Tensor]) -> List[torch.Tensor]:
outputs = []
for i, t in enumerate(inputs):
outputs.append(t * i)
return outputs
def prepare_for_export(self, cfg, inputs, predictor_type):
if cfg == "explicit":
return PredictorExportConfig(
model=self,
data_generator=None, # data is not needed for scripting
model_export_kwargs={
"jit_mode": "script"
}, # explicitly using script mode
)
elif cfg == "implicit":
# Sometime user wants to switch between scripting and tracing without
# touching the PredictorExportConfig
return PredictorExportConfig(
model=self,
data_generator=None, # data is not needed for scripting
)
raise NotImplementedError()
class TestExportAPI(unittest.TestCase):
def _export_simple_model(self, cfg, model, data, output_dir, predictor_type):
predictor_path = convert_and_export_predictor(
cfg,
model,
predictor_type=predictor_type,
output_dir=output_dir,
data_loader=iter([data] * 3),
)
self.assertTrue(os.path.isdir(predictor_path))
# also test loading predictor
predictor = create_predictor(predictor_path)
return predictor
def test_simple_model(self):
with make_temp_directory("test_simple_model") as tmp_dir:
model = SimpleModel()
predictor = self._export_simple_model(
None, model, torch.tensor(1), tmp_dir, predictor_type="torchscript"
)
x = torch.tensor(42)
self.assertEqual(predictor(x), model(x))
def test_simple_two_part_model(self):
with make_temp_directory("test_simple_two_part_model") as tmp_dir:
model = TwoPartSimpleModel()
predictor = self._export_simple_model(
None, model, torch.tensor(1), tmp_dir, predictor_type="torchscript"
)
x = torch.tensor(42)
self.assertEqual(predictor(x), model(x))
def test_script_only_model(self):
def _validate(predictor):
outputs = predictor([torch.tensor(1), torch.tensor(2), torch.tensor(3)])
self.assertEqual(len(outputs), 3)
self.assertEqual(
outputs, [torch.tensor(0), torch.tensor(2), torch.tensor(6)]
)
# Method 1: explicitly set jit_mode to "trace"
with make_temp_directory("test_test_script_only_model") as tmp_dir:
model = ScriptingOnlyModel()
predictor = self._export_simple_model(
"explicit", model, None, tmp_dir, predictor_type="torchscript"
)
_validate(predictor)
# Method 2: using torchscript@scripting as predictor type
with make_temp_directory("test_test_script_only_model") as tmp_dir:
model = ScriptingOnlyModel()
predictor = self._export_simple_model(
"implicit", model, None, tmp_dir, predictor_type="torchscript@scripting"
)
_validate(predictor)
class MultiTensorInSingleTensorOut(nn.Module):
def forward(self, x, y):
return x + y
@staticmethod
def get_input_args():
return (torch.tensor([2]), torch.tensor([3]))
@staticmethod
def check_outputs(new_output, original_output):
torch.testing.assert_close(new_output, torch.tensor([5]))
# NOTE: caffe2 wrapper assumes tensors are fp32
class SingleListInSingleListOut(nn.Module):
def forward(self, inputs):
x, y = inputs
return [x + y]
@staticmethod
def get_input_args():
inputs = [torch.tensor([2.0]), torch.tensor([3.0])]
return (inputs,)
@staticmethod
def check_outputs(new_output, original_output):
assert len(new_output) == 1
torch.testing.assert_close(new_output[0], torch.tensor([5.0]))
class MultiDictInMultiDictOut(nn.Module):
def forward(self, x, y):
first = {"add": x["first"] + y["first"], "sub": x["first"] - y["first"]}
second = {"add": x["second"] + y["second"], "sub": x["second"] - y["second"]}
return [first, second]
@staticmethod
def get_input_args():
return (
{"first": torch.tensor([1]), "second": torch.tensor([2])}, # x
{"first": torch.tensor([3]), "second": torch.tensor([4])}, # y
)
@staticmethod
def check_outputs(new_output, original_output):
first, second = original_output
torch.testing.assert_close(first["add"], torch.tensor([4]))
torch.testing.assert_close(first["sub"], torch.tensor([-2]))
torch.testing.assert_close(second["add"], torch.tensor([6]))
torch.testing.assert_close(second["sub"], torch.tensor([-2]))
MODEL_EXPORT_METHOD_TEST_CASES = [
[DefaultTorchscriptExport, MultiTensorInSingleTensorOut],
[DefaultTorchscriptExport, SingleListInSingleListOut],
[TracingAdaptedTorchscriptExport, MultiTensorInSingleTensorOut],
[TracingAdaptedTorchscriptExport, SingleListInSingleListOut],
[TracingAdaptedTorchscriptExport, MultiDictInMultiDictOut],
]
try:
from d2go.export.fb.caffe2 import DefaultCaffe2Export
MODEL_EXPORT_METHOD_TEST_CASES.extend(
[
# [DefaultCaffe2Export, MultiTensorInSingleTensorOut], # TODO: make caffe2 support this
[DefaultCaffe2Export, SingleListInSingleListOut],
]
)
except ImportError:
pass
class TestModelExportMethods(unittest.TestCase):
@parameterized.expand(
MODEL_EXPORT_METHOD_TEST_CASES,
name_func=lambda testcase_func, param_num, param: (
"{}_{}_{}".format(
testcase_func.__name__, param.args[0].__name__, param.args[1].__name__
)
),
)
def test_interface(self, model_export_method, test_model_class):
model = test_model_class()
input_args = test_model_class.get_input_args()
output_checker = test_model_class.check_outputs
model_export_method.test_export_and_load(
model, input_args, None, {}, output_checker
)
|
d2go-main
|
tests/export/test_api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import shutil
import tempfile
import unittest
import torch
from d2go.data.disk_cache import DiskCachedList, ROOT_CACHE_DIR
from d2go.data.utils import configure_dataset_creation
from d2go.runner import create_runner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
register_toy_coco_dataset,
)
class TestD2GoDatasetMapper(unittest.TestCase):
"""
This class test D2GoDatasetMapper which is used to build
data loader in GeneralizedRCNNRunner (the default runner) in Detectron2Go.
"""
def setUp(self):
self.output_dir = tempfile.mkdtemp(prefix="TestD2GoDatasetMapper_")
self.addCleanup(shutil.rmtree, self.output_dir)
def test_default_dataset(self):
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
cfg.OUTPUT_DIR = self.output_dir
with register_toy_coco_dataset("default_dataset_train", num_images=3):
train_loader = runner.build_detection_train_loader(cfg)
for i, data in enumerate(train_loader):
self.assertIsNotNone(data)
# for training loader, it has infinite length
if i == 6:
break
with register_toy_coco_dataset("default_dataset_test", num_images=3):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
all_data = []
for data in test_loader:
all_data.append(data)
self.assertEqual(len(all_data), 3)
class _MyClass(object):
def __init__(self, x):
self.x = x
def do_something(self):
return
class TestDiskCachedDataLoader(unittest.TestCase):
def setUp(self):
# make sure the ROOT_CACHE_DIR is empty when entering the test
if os.path.exists(ROOT_CACHE_DIR):
shutil.rmtree(ROOT_CACHE_DIR)
self.output_dir = tempfile.mkdtemp(prefix="TestDiskCachedDataLoader_")
self.addCleanup(shutil.rmtree, self.output_dir)
def _count_cache_dirs(self):
if not os.path.exists(ROOT_CACHE_DIR):
return 0
return len(os.listdir(ROOT_CACHE_DIR))
def test_disk_cached_dataset_from_list(self):
"""Test the class of DiskCachedList"""
# check the discache can handel different data types
lst = [1, torch.tensor(2), _MyClass(3)]
disk_cached_lst = DiskCachedList(lst)
self.assertEqual(len(disk_cached_lst), 3)
self.assertEqual(disk_cached_lst[0], 1)
self.assertEqual(disk_cached_lst[1].item(), 2)
self.assertEqual(disk_cached_lst[2].x, 3)
# check the cache is created
cache_dir = disk_cached_lst.cache_dir
self.assertTrue(os.path.isdir(cache_dir))
# check the cache is properly released
del disk_cached_lst
self.assertFalse(os.path.isdir(cache_dir))
def test_disk_cached_dataloader(self):
"""Test the data loader backed by disk cache"""
height = 6
width = 8
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.OUTPUT_DIR = self.output_dir
cfg.DATALOADER.NUM_WORKERS = 2
def _test_data_loader(data_loader):
first_batch = next(iter(data_loader))
self.assertTrue(first_batch[0]["height"], height)
self.assertTrue(first_batch[0]["width"], width)
# enable the disk cache
cfg.merge_from_list(["D2GO_DATA.DATASETS.DISK_CACHE.ENABLED", "True"])
with configure_dataset_creation(cfg):
# no cache dir in the beginning
self.assertEqual(self._count_cache_dirs(), 0)
with create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train=True
) as train_loader:
# train loader should create one cache dir
self.assertEqual(self._count_cache_dirs(), 1)
_test_data_loader(train_loader)
with create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train=False
) as test_loader:
# test loader should create another cache dir
self.assertEqual(self._count_cache_dirs(), 2)
_test_data_loader(test_loader)
# test loader should release its cache
del test_loader
self.assertEqual(self._count_cache_dirs(), 1)
# no cache dir in the end
del train_loader
self.assertEqual(self._count_cache_dirs(), 0)
|
d2go-main
|
tests/data/test_data_loader.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import unittest
from typing import Tuple
import cv2
import numpy as np
import torchvision.transforms as T
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations, AugInput
def generate_test_data(
source_img: np.ndarray,
angle: float = 0,
translation: float = 0,
scale: float = 1,
shear: float = 0,
fit_in_frame: bool = True,
keep_aspect_ratio: bool = False,
) -> Tuple[str, np.ndarray]:
# Augmentation dictionary
aug_dict = {
"prob": 1.0,
"angle_range": [angle, angle],
"translation_range": [translation, translation],
"scale_range": [scale, scale],
"shear_range": [shear, shear],
"keep_aspect_ratio": keep_aspect_ratio,
"fit_in_frame": fit_in_frame,
}
aug_str = "RandomAffineOp::" + json.dumps(aug_dict)
# Get image info
img_h, img_w = source_img.shape[0:2]
center = [img_w / 2, img_h / 2]
# Compute output_size
max_size = max(img_w, img_h)
out_w, out_h = (img_w, img_h) if keep_aspect_ratio else (max_size, max_size)
if fit_in_frame:
# Warp once to figure scale adjustment
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, [0, 0], 1, [shear, shear]
)
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
# Center in output patch
img_corners = np.array(
[
[0, 0, img_w - 1, img_w - 1],
[0, img_h - 1, 0, img_h - 1],
[1, 1, 1, 1],
]
)
new_corners = M @ img_corners
x_range = np.ceil(np.amax(new_corners[0]) - np.amin(new_corners[0]))
y_range = np.ceil(np.amax(new_corners[1]) - np.amin(new_corners[1]))
# Apply translation and scale after centering in output patch
scale_adjustment = min(out_w / x_range, out_h / y_range)
scale *= scale_adjustment
# Adjust output center location
translation_t = [translation, translation]
translation_adjustment = [(out_w - img_w) / 2, (out_h - img_h) / 2]
translation_t[0] += translation_adjustment[0]
translation_t[1] += translation_adjustment[1]
# Test data output generation
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, translation_t, scale, [shear, shear]
)
M_inv = np.array(M_inv).reshape((2, 3))
exp_out_img = cv2.warpAffine(
source_img,
M_inv,
(out_w, out_h),
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REPLICATE,
)
# Create annotations
test_bbox = [0.25 * img_w, 0.25 * img_h, 0.75 * img_h, 0.75 * img_h]
# Generate segmentation test data
segm_mask = np.zeros_like(source_img)
segm_mask[
int(test_bbox[0]) : int(test_bbox[2]), int(test_bbox[1]) : int(test_bbox[3])
] = 255
exp_out_segm = cv2.warpAffine(
segm_mask,
M_inv,
(out_w, out_h),
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REPLICATE,
)
# Generate bounding box test data
M_inv = np.vstack([M_inv, [0.0, 0.0, 1.0]])
points = np.array(
[
[test_bbox[0], test_bbox[0], test_bbox[2], test_bbox[2]],
[test_bbox[1], test_bbox[3], test_bbox[1], test_bbox[3]],
]
).T
_xp = warp_points(points, M_inv)
out_bbox = [min(_xp[:, 0]), min(_xp[:, 1]), max(_xp[:, 0]), max(_xp[:, 1])]
return (
aug_str,
AugInput(source_img, boxes=[test_bbox], sem_seg=segm_mask),
(exp_out_img, [out_bbox], exp_out_segm),
)
def warp_points(coords: np.array, xfm_M: np.array):
coords = coords.T
ones = np.ones((1, coords.shape[1]))
coords = np.vstack((coords, ones))
M = np.linalg.inv(xfm_M)
coords = (M @ coords)[:2, :].T
return coords
class TestDataTransformsAffine(unittest.TestCase):
def _validate_results(self, aug_output, exp_outputs):
exp_img = exp_outputs[0]
self.assertTrue(
np.allclose(exp_img, aug_output.image),
f"Augmented image not the same, expecting\n{exp_img[:,:,0]} \n got\n{aug_output.image[:,:,0]} ",
)
exp_bboxes = exp_outputs[1]
self.assertTrue(
np.allclose(exp_bboxes, aug_output.boxes, atol=0.000001),
f"Augmented bbox not the same, expecting\n{exp_img[:,:,0]} \n got\n{aug_output.image[:,:,0]} ",
)
exp_segm = exp_outputs[2]
self.assertTrue(
np.allclose(exp_segm, aug_output.sem_seg),
f"Augmented segm not the same, expecting\n{exp_segm} \n got\n{aug_output.sem_seg[:,:]} ",
)
def test_affine_transforms_angle(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for angle in [45, 90]:
aug_str, aug_input, exp_outputs = generate_test_data(img, angle=angle)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_translation(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for translation in [0, 1, 2]:
# Test image
aug_str, aug_input, exp_outputs = generate_test_data(
img, translation=translation
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_shear(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for shear in [0, 1, 2]:
aug_str, aug_input, exp_outputs = generate_test_data(img, shear=shear)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_scale(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for scale in [0.9, 1, 1.1]:
aug_str, aug_input, exp_outputs = generate_test_data(img, scale=scale)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_angle_non_square(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz - 2, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
for keep_aspect_ratio in [False, True]:
aug_str, aug_input, exp_outputs = generate_test_data(
img, angle=45, keep_aspect_ratio=keep_aspect_ratio
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
def test_affine_transforms_angle_no_fit_to_frame(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img_sz = 11
img = np.zeros((img_sz, img_sz, 3)).astype(np.uint8)
img[((img_sz + 1) // 2) - 1, :, :] = 255
aug_str, aug_input, exp_outputs = generate_test_data(
img, angle=45, fit_in_frame=False
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [aug_str]
tfm = build_transform_gen(default_cfg, is_train=True)
# Test augmentation
aug_output, _ = apply_augmentations(tfm, aug_input)
self._validate_results(aug_output, exp_outputs)
|
d2go-main
|
tests/data/test_data_transforms_affine.py
|
d2go-main
|
tests/data/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import numpy as np
import torch
from d2go.config import CfgNode
from d2go.data.transforms.build import build_transform_gen
def get_default_config():
cfg = CfgNode()
cfg.D2GO_DATA = CfgNode()
cfg.D2GO_DATA.AUG_OPS = CfgNode()
return cfg
class TestDataTransformsBoxUtils(unittest.TestCase):
def test_min_box_ar(self):
box_xywh = [4, 5, 10, 6]
target_aspect_ratio = 1.0 / 2
new_box = bu.get_min_box_aspect_ratio(box_xywh, target_aspect_ratio)
self.assertArrayEqual(torch.Tensor([4, -2, 10, 20]), new_box)
def test_get_box_from_mask(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
self.assertEqual(mask.shape, (img_h, img_w))
mask[2:4, 3:6] = 1
box = bu.get_box_from_mask(mask)
self.assertEqual(box, (3, 2, 3, 2))
def test_get_box_from_mask_union(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
self.assertEqual(mask.shape, (img_h, img_w))
mask[2:4, 1:4] = 1
mask[5:6, 4:8] = 1
box = bu.get_box_from_mask(mask)
self.assertEqual(box, (1, 2, 7, 4))
def test_get_box_from_mask_empty(self):
img_w, img_h = 8, 6
mask = np.zeros([img_h, img_w])
box = bu.get_box_from_mask(mask)
self.assertIsNone(box)
def test_scale_bbox_center(self):
bbox = torch.Tensor([1, 2, 4, 5])
out_bbox = bu.scale_bbox_center(bu.scale_bbox_center(bbox, 2.0), 0.5)
self.assertArrayEqual(bbox, out_bbox)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
def test_enlarge_bounding_box(self):
default_cfg = get_default_config()
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'EnlargeBoundingBoxOp::{"fixed_pad": 20}',
'EnlargeBoundingBoxOp::{"percentage": 0.2}',
]
enlarge_box_tfm = build_transform_gen(default_cfg, is_train=True)
boxes = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[0].apply_box(boxes)
expected_bboxs = np.array(
[[71, 26, 164, 131]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
boxes = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[1].apply_box(boxes)
expected_bboxs = np.array(
[[85.7, 39.5, 149.3, 117.5]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
boxes = np.array(
[[[91, 46], [144, 111]]],
dtype=np.float64,
)
transformed_bboxs = enlarge_box_tfm[1].apply_polygons(boxes)
expected_bboxs = np.array(
[[[85.7, 39.5], [149.3, 117.5]]],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(
transformed_bboxs, expected_bboxs
)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
dummy_data = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
dummy_data_out = enlarge_box_tfm[1].apply_image(dummy_data)
expected_out = np.array(
[[91, 46, 144, 111]],
dtype=np.float64,
)
err_msg = "Apply image failed"
self.assertTrue(np.allclose(dummy_data_out, expected_out), err_msg)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'EnlargeBoundingBoxOp::{"fixed_pad": 20, "box_only": true}',
]
enlarge_box_tfm = build_transform_gen(default_cfg, is_train=True)
boxes = np.array([[91, 46, 144, 111]])
transformed_bboxs = enlarge_box_tfm[0].apply_coords(boxes)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, boxes)
self.assertTrue(np.allclose(transformed_bboxs, boxes), err_msg)
|
d2go-main
|
tests/data/test_data_transforms_box_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsBlur(unittest.TestCase):
def test_gaussian_blur_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.zeros((80, 60, 3)).astype(np.uint8)
img[40, 30, :] = 255
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomGaussianBlurOp::{"prob": 1.0, "k": 3, "sigma_range": [0.5, 0.5]}'
]
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
self.assertEqual(trans_img[39, 29, 0], 3)
self.assertEqual(trans_img[40, 29, 0], 21)
|
d2go-main
|
tests/data/test_data_transforms_blur.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import d2go.data.transforms.box_utils as bu
import numpy as np
import torch
from d2go.data.transforms import crop as tf_crop
class TestDataTransformsCrop(unittest.TestCase):
def test_transform_crop_extent_transform(self):
img_wh = (16, 11)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
# h, w
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
sem_seg[5:11, 4:14] = 1
# src_rect: [x0, y0, x1, y1] in pixel coordinate, output_size: [h, w]
trans = tf_crop.ExtentTransform(src_rect=[4, 5, 14, 11], output_size=[6, 10])
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([6, 10]))
self.assertArrayEqual(np.unique(out_mask), torch.Tensor([1]))
trans = tf_crop.ExtentTransform(src_rect=[3, 4, 15, 11], output_size=[7, 12])
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([7, 12]))
self.assertArrayEqual(np.unique(out_mask), torch.Tensor([0, 1]))
self.assertArrayEqual(np.unique(out_mask[1:, 1:-1]), torch.Tensor([1]))
self.assertEqual(out_mask[:, 0].sum(), 0)
self.assertArrayEqual(out_mask[0, :].sum(), 0)
self.assertArrayEqual(out_mask[:, -1].sum(), 0)
def test_transform_crop_random_crop_fixed_aspect_ratio(self):
aug = tf_crop.RandomCropFixedAspectRatio([1.0 / 2])
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertArrayEqual(mask_xywh, torch.Tensor([4, 5, 10, 6]))
trans = aug.get_transform(img, sem_seg)
self.assertArrayEqual(trans.src_rect, torch.Tensor([4, -2, 14, 18]))
self.assertArrayEqual(trans.output_size, torch.Tensor([20, 10]))
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, torch.Tensor([20, 10, 3]))
self.assertArrayEqual(np.unique(out_img[2:13, :, :]), torch.Tensor([1]))
self.assertArrayEqual(np.unique(out_img[0:2, :, :]), torch.Tensor([0]))
self.assertArrayEqual(np.unique(out_img[13:, :, :]), torch.Tensor([0]))
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([20, 10]))
self.assertEqual(out_mask[7, 0], 1)
self.assertEqual(out_mask[12, -1], 1)
def test_transform_crop_random_crop_fixed_aspect_ratio_scale_offset(self):
aug = tf_crop.RandomCropFixedAspectRatio(
[1.0 / 2], scale_range=[0.5, 0.5], offset_scale_range=[-0.5, -0.5]
)
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
sem_seg[5, 4] = 1
sem_seg[10, 13] = 1
sem_seg[5:11, 4:14] = 1
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertArrayEqual(mask_xywh, torch.Tensor([4, 5, 10, 6]))
trans = aug.get_transform(img, sem_seg)
self.assertArrayEqual(trans.src_rect, torch.Tensor([1.5, 0.0, 6.5, 10.0]))
self.assertArrayEqual(trans.output_size, torch.Tensor([10, 5]))
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, torch.Tensor([10, 5, 3]))
self.assertEqual(np.unique(out_img), 1)
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, torch.Tensor([10, 5]))
self.assertEqual(np.unique(out_mask[6:, 3:]), 1)
def test_transform_crop_random_crop_fixed_aspect_ratio_empty_mask(self):
"""The sem_mask is empty (the whole image is background)"""
aug = tf_crop.RandomCropFixedAspectRatio([1.0 / 2])
img_wh = (16, 11)
img = np.ones([img_wh[1], img_wh[0], 3], dtype=np.uint8)
sem_seg = np.zeros([img_wh[1], img_wh[0]], dtype=np.uint8)
mask_xywh = bu.get_box_from_mask(sem_seg)
self.assertEqual(mask_xywh, None)
trans = aug.get_transform(img, sem_seg)
self.assertIsInstance(trans, tf_crop.NoOpTransform)
out_img = trans.apply_image(img)
self.assertArrayEqual(out_img.shape, img.shape)
out_mask = trans.apply_segmentation(sem_seg)
self.assertArrayEqual(out_mask.shape, sem_seg.shape)
def test_pad_transform(self):
crop_w, crop_h = 4, 3
full_w, full_h = 11, 9
crop_x, crop_y = 5, 6
trans = tf_crop.PadTransform(crop_x, crop_y, crop_w, crop_h, full_w, full_h)
img = np.ones([crop_h, crop_w])
trans_img = trans.apply_image(img)
self.assertArrayEqual(trans_img.shape, [full_h, full_w])
self.assertArrayEqual(np.unique(trans_img), [0, 1])
full_img_gt = np.zeros([full_h, full_w])
full_img_gt[crop_y : (crop_y + crop_h), crop_x : (crop_x + crop_w)] = 1
self.assertArrayEqual(full_img_gt, trans_img)
def test_crop_transform_inverse(self):
crop_w, crop_h = 4, 3
full_w, full_h = 11, 9
crop_x, crop_y = 5, 6
trans = tf_crop.InvertibleCropTransform(
crop_x, crop_y, crop_w, crop_h, full_w, full_h
)
full_img_gt = np.zeros([full_h, full_w])
full_img_gt[crop_y : (crop_y + crop_h), crop_x : (crop_x + crop_w)] = 1
crop_img_gt = np.ones([crop_h, crop_w])
self.assertArrayEqual(trans.apply_image(full_img_gt), crop_img_gt)
self.assertArrayEqual(trans.inverse().apply_image(crop_img_gt), full_img_gt)
self.assertArrayEqual(
trans.inverse().inverse().apply_image(full_img_gt), crop_img_gt
)
def test_pad_border_divisible_transform(self):
img_h, img_w = 10, 7
divisibility = 8
aug = tf_crop.PadBorderDivisible(divisibility)
img = np.ones([img_h, img_w, 3]) * 3
trans = aug.get_transform(img)
pad_img = trans.apply_image(img)
self.assertEqual(pad_img.shape, (16, 8, 3))
inverse_img = trans.inverse().apply_image(pad_img)
self.assertEqual(inverse_img.shape, (10, 7, 3))
self.assertArrayEqual(img, inverse_img)
mask = np.ones([img_h, img_w]) * 2
pad_mask = trans.apply_segmentation(mask)
self.assertEqual(pad_mask.shape, (16, 8))
inverse_mask = trans.inverse().apply_segmentation(pad_mask)
self.assertEqual(inverse_mask.shape, (10, 7))
self.assertArrayEqual(mask, inverse_mask)
def test_pad_to_square_augmentation(self):
img_h, img_w = 5, 3
aug = tf_crop.PadToSquare(pad_value=255)
img = np.ones([img_h, img_w, 3])
trans = aug.get_transform(img)
pad_img = trans.apply_image(img)
self.assertEqual(pad_img.shape, (5, 5, 3))
def test_random_instance_crop(self):
from detectron2.data import detection_utils as du
from detectron2.data.transforms.augmentation import AugInput, AugmentationList
from detectron2.structures import BoxMode
aug = tf_crop.RandomInstanceCrop([1.0, 1.0])
img_w, img_h = 10, 7
annotations = [
{
"category_id": 0,
"bbox": [1, 1, 4, 3],
"bbox_mode": BoxMode.XYWH_ABS,
},
{
"category_id": 0,
"bbox": [2, 2, 4, 3],
"bbox_mode": BoxMode.XYWH_ABS,
},
{
"category_id": 0,
"bbox": [6, 5, 3, 2],
"bbox_mode": BoxMode.XYWH_ABS,
},
]
img = np.ones([img_h, img_w, 3]) * 3
inputs = AugInput(image=img)
# pass additional arguments
inputs.annotations = annotations
transforms = AugmentationList([aug])(inputs)
self.assertIn(
inputs.image.shape, [torch.Size([3, 4, 3]), torch.Size([2, 3, 3])]
)
# from dataset mapper unused annotations will be filtered out due to the
# iscrowd flag
image_shape = inputs.image.shape[:2]
annos = [
du.transform_instance_annotations(
obj,
transforms,
image_shape,
)
for obj in annotations
if obj.get("iscrowd", 0) == 0
]
instances = du.annotations_to_instances(annos, image_shape)
filtered_instances = du.filter_empty_instances(instances)
self.assertEqual(len(filtered_instances), 1)
self.assertArrayEqual(
filtered_instances.gt_boxes.tensor.tolist(),
[[0, 0, image_shape[1], image_shape[0]]],
)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
|
d2go-main
|
tests/data/test_data_transforms_crop.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_transform_gens
class TestDataTransforms(unittest.TestCase):
def test_build_transform_gen(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
default_cfg.INPUT.MIN_SIZE_TRAIN = (30,)
default_cfg.INPUT.MIN_SIZE_TEST = 30
trans_train = build_transform_gen(default_cfg, is_train=True)
trans_test = build_transform_gen(default_cfg, is_train=False)
img = np.zeros((80, 60, 3))
trans_img_train, tl_train = apply_transform_gens(trans_train, img)
trans_img_test, tl_test = apply_transform_gens(trans_test, img)
self.assertEqual(trans_img_train.shape, (40, 30, 3))
self.assertEqual(trans_img_test.shape, (40, 30, 3))
def test_build_transform_gen_resize_square(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
default_cfg.INPUT.MIN_SIZE_TRAIN = (30,)
default_cfg.INPUT.MIN_SIZE_TEST = 40
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ["ResizeShortestEdgeSquareOp"]
default_cfg.D2GO_DATA.AUG_OPS.TEST = ["ResizeShortestEdgeSquareOp"]
trans_train = build_transform_gen(default_cfg, is_train=True)
trans_test = build_transform_gen(default_cfg, is_train=False)
img = np.zeros((80, 60, 3))
trans_img_train, tl_train = apply_transform_gens(trans_train, img)
trans_img_test, tl_test = apply_transform_gens(trans_test, img)
self.assertEqual(trans_img_train.shape, (30, 30, 3))
self.assertEqual(trans_img_test.shape, (40, 40, 3))
|
d2go-main
|
tests/data/test_data_transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsAutoAug(unittest.TestCase):
def test_rand_aug_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ['RandAugmentImageOp::{"num_ops": 20}']
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
def test_trivial_aug_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ["TrivialAugmentWideImageOp"]
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
def test_aug_mix_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
(np.random.uniform(0, 1, size=(80, 60, 1)) * 255).astype(np.uint8),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = ['AugMixImageOp::{"severity": 3}']
tfm = build_transform_gen(default_cfg, is_train=True)
trans_img, _ = apply_augmentations(tfm, img)
self.assertEqual(img.shape, trans_img.shape)
self.assertEqual(img.dtype, trans_img.dtype)
|
d2go-main
|
tests/data/test_data_transforms_auto_aug.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from d2go.data.transforms import color_yuv as cy
from d2go.data.transforms.build import build_transform_gen
from d2go.runner import Detectron2GoRunner
from detectron2.data.transforms.augmentation import apply_augmentations
class TestDataTransformsColorYUV(unittest.TestCase):
def test_yuv_color_transforms(self):
default_cfg = Detectron2GoRunner.get_default_cfg()
img = np.concatenate(
[
np.random.uniform(0, 1, size=(80, 60, 1)),
np.random.uniform(-0.5, 0.5, size=(80, 60, 1)),
np.random.uniform(-0.5, 0.5, size=(80, 60, 1)),
],
axis=2,
)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomContrastYUVOp::{"intensity_min": 0.3, "intensity_max": 0.5}',
]
low_contrast_tfm = build_transform_gen(default_cfg, is_train=True)
low_contrast, _ = apply_augmentations(low_contrast_tfm, img)
default_cfg.D2GO_DATA.AUG_OPS.TRAIN = [
'RandomSaturationYUVOp::{"intensity_min": 1.5, "intensity_max": 1.7}',
]
high_saturation_tfm = build_transform_gen(default_cfg, is_train=True)
high_saturation, _ = apply_augmentations(high_saturation_tfm, img)
# Use pixel statistics to roughly check transformed images as expected
# All channels have less variance
self.assertLess(np.var(low_contrast[:, :, 0]), np.var(img[:, :, 0]))
self.assertLess(np.var(low_contrast[:, :, 1]), np.var(img[:, :, 1]))
self.assertLess(np.var(low_contrast[:, :, 2]), np.var(img[:, :, 2]))
# 1st channel is unchanged (test w/ mean, var), 2nd + 3rd channels more variance
self.assertAlmostEqual(np.mean(high_saturation[:, :, 0]), np.mean(img[:, :, 0]))
self.assertAlmostEqual(np.var(high_saturation[:, :, 0]), np.var(img[:, :, 0]))
self.assertGreater(np.var(high_saturation[:, :, 1]), np.var(img[:, :, 1]))
self.assertGreater(np.var(high_saturation[:, :, 2]), np.var(img[:, :, 2]))
def test_transform_color_yuv_rgbyuv_convert(self):
image = np.arange(256).reshape(16, 16, 1).repeat(3, axis=2).astype(np.uint8)
tf1 = cy.RGB2YUVBT601().get_transform(image)
tf2 = cy.YUVBT6012RGB().get_transform(image)
image_yuv = tf1.apply_image(image)
image_rgb = tf2.apply_image(image_yuv)
self.assertArrayEqual((image_rgb + 0.5).astype(np.uint8), image)
def test_transform_color_yuv_rgbyuv_convert_invese(self):
image = np.arange(256).reshape(16, 16, 1).repeat(3, axis=2).astype(np.uint8)
tf = cy.RGB2YUVBT601().get_transform(image)
image_yuv = tf.apply_image(image)
image_rgb = tf.inverse().apply_image(image_yuv)
self.assertArrayEqual((image_rgb + 0.5).astype(np.uint8), image)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
|
d2go-main
|
tests/data/test_data_transforms_color_yuv.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import os
import tempfile
import unittest
import d2go.data.extended_coco as extended_coco
from d2go.data.datasets import ANN_FN, COCO_REGISTER_FUNCTION_REGISTRY, IM_DIR
from d2go.data.keypoint_metadata_registry import (
get_keypoint_metadata,
KEYPOINT_METADATA_REGISTRY,
KeypointMetadata,
)
from d2go.data.utils import (
AdhocDatasetManager,
COCOWithClassesToUse,
maybe_subsample_n_images,
)
from d2go.runner import Detectron2GoRunner
from d2go.utils.testing.data_loader_helper import (
create_toy_dataset,
LocalImageGenerator,
)
from d2go.utils.testing.helper import tempdir
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
def create_test_images_and_dataset_json(data_dir, num_images=10, num_classes=-1):
# create image and json
image_dir = os.path.join(data_dir, "images")
os.makedirs(image_dir)
json_dataset, meta_data = create_toy_dataset(
LocalImageGenerator(image_dir, width=80, height=60),
num_images=num_images,
num_classes=num_classes,
)
json_file = os.path.join(data_dir, "annotation.json")
with open(json_file, "w") as f:
json.dump(json_dataset, f)
return image_dir, json_file
class TestD2GoDatasets(unittest.TestCase):
def setUp(self):
self._builtin_datasets = set(DatasetCatalog)
def tearDown(self):
# Need to remove injected dataset
injected_dataset = set(DatasetCatalog) - self._builtin_datasets
for ds in injected_dataset:
DatasetCatalog.remove(ds)
MetadataCatalog.remove(ds)
def test_coco_conversions(self):
test_data_0 = {
"info": {},
"imgs": {
"img_1": {
"file_name": "0.jpg",
"width": 600,
"height": 600,
"id": "img_1",
}
},
"anns": {0: {"id": 0, "image_id": "img_1", "bbox": [30, 30, 60, 20]}},
"imgToAnns": {"img_1": [0]},
"cats": {},
}
test_data_1 = copy.deepcopy(test_data_0)
test_data_1["imgs"][123] = test_data_1["imgs"].pop("img_1")
test_data_1["imgs"][123]["id"] = 123
test_data_1["anns"][0]["image_id"] = 123
test_data_1["imgToAnns"][123] = test_data_1["imgToAnns"].pop("img_1")
for test_data, exp_output in [(test_data_0, [0, 0]), (test_data_1, [123, 123])]:
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
src_json = os.path.join(tmp_dir, "source.json")
out_json = os.path.join(tmp_dir, "output.json")
with open(src_json, "w") as h_in:
json.dump(test_data, h_in)
out_json = extended_coco.convert_coco_text_to_coco_detection_json(
src_json, out_json
)
self.assertEqual(out_json["images"][0]["id"], exp_output[0])
self.assertEqual(out_json["annotations"][0]["image_id"], exp_output[1])
def test_annotation_rejection(self):
img_list = [
{"id": 0, "width": 50, "height": 50, "file_name": "a.png"},
{"id": 1, "width": 50, "height": 50, "file_name": "b.png"},
{"id": 2, "width": 50, "height": 50, "file_name": "b.png"},
]
ann_list = [
[
{
"id": 0,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [0, 0, 10, 10],
},
{
"id": 1,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [45, 45, 10, 10],
},
{
"id": 2,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [-5, -5, 10, 10],
},
{
"id": 3,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 0,
"bbox": [5, 5, 0, 0],
},
{
"id": 4,
"image_id": 0,
"category_id": 0,
"segmentation": [[]],
"area": 25,
"bbox": [5, 5, 5, 5],
},
],
[
{
"id": 5,
"image_id": 1,
"category_id": 0,
"segmentation": [[]],
"area": 100,
"bbox": [0, 0, 0, 0],
},
],
[],
]
out_dict_list = extended_coco.convert_to_dict_list("", [0], img_list, ann_list)
self.assertEqual(len(out_dict_list), 1)
self.assertEqual(len(out_dict_list[0]["annotations"]), 1)
out_dict_list = extended_coco.convert_to_dict_list(
"", [0], img_list, ann_list, filter_empty_annotations=False
)
self.assertEqual(len(out_dict_list), 3)
@tempdir
def test_coco_injection(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds1", "inj_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, "/mnt/fair"],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, "inj_ds2"],
]
]
)
runner.register(cfg)
inj_ds1 = DatasetCatalog.get("inj_ds1")
self.assertEqual(len(inj_ds1), 10)
for dic in inj_ds1:
self.assertEqual(dic["width"], 80)
self.assertEqual(dic["height"], 60)
@tempdir
def test_direct_copy_keys(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
with tempfile.NamedTemporaryFile(prefix=tmp_dir, suffix=".json") as h_temp:
new_json_file = h_temp.name
with open(json_file, "r") as h_in:
ds = json.load(h_in)
for idx, x in enumerate(ds["images"]):
x["key1"] = idx
x["key2"] = idx
with open(new_json_file, "w") as h_out:
json.dump(ds, h_out)
loaded_ds = extended_coco.extended_coco_load(new_json_file, image_dir)
self.assertTrue("key1" not in loaded_ds[0])
self.assertTrue("key2" not in loaded_ds[0])
loaded_ds = extended_coco.extended_coco_load(
new_json_file, image_dir, image_direct_copy_keys=["key1"]
)
self.assertTrue("key1" in loaded_ds[0])
self.assertTrue("key2" not in loaded_ds[0])
@tempdir
def test_sub_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds3"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"DATASETS.TEST",
("inj_ds3",),
"D2GO_DATA.TEST.MAX_IMAGES",
1,
]
]
)
runner.register(cfg)
with maybe_subsample_n_images(cfg) as new_cfg:
test_loader = runner.build_detection_test_loader(
new_cfg, new_cfg.DATASETS.TEST[0]
)
self.assertEqual(len(test_loader), 1)
def test_coco_metadata_registry(self):
@KEYPOINT_METADATA_REGISTRY.register()
def TriangleMetadata():
return KeypointMetadata(
names=("A", "B", "C"),
flip_map=(
("A", "B"),
("B", "C"),
),
connection_rules=[
("A", "B", (102, 204, 255)),
("B", "C", (51, 153, 255)),
],
)
tri_md = get_keypoint_metadata("TriangleMetadata")
self.assertEqual(tri_md["keypoint_names"][0], "A")
self.assertEqual(tri_md["keypoint_flip_map"][0][0], "A")
self.assertEqual(tri_md["keypoint_connection_rules"][0][0], "A")
@tempdir
def test_coco_metadata_register(self, tmp_dir):
@KEYPOINT_METADATA_REGISTRY.register()
def LineMetadata():
return KeypointMetadata(
names=("A", "B"),
flip_map=(("A", "B"),),
connection_rules=[
("A", "B", (102, 204, 255)),
],
)
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA",
["LineMetadata"],
]
]
)
runner.register(cfg)
inj_md = MetadataCatalog.get("inj_ds")
self.assertEqual(inj_md.keypoint_names[0], "A")
self.assertEqual(inj_md.keypoint_flip_map[0][0], "A")
self.assertEqual(inj_md.keypoint_connection_rules[0][0], "A")
@tempdir
def test_coco_create_adhoc_class_to_use_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(
tmp_dir, num_classes=2
)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["test_adhoc_ds", "test_adhoc_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, json_file],
]
]
)
runner.register(cfg)
# Test adhoc classes to use
AdhocDatasetManager.add(COCOWithClassesToUse("test_adhoc_ds", ["class_0"]))
ds_list = DatasetCatalog.get("test_adhoc_ds@1classes")
self.assertEqual(len(ds_list), 5)
# Test adhoc classes to use with suffix removal
AdhocDatasetManager.add(
COCOWithClassesToUse("test_adhoc_ds2@1classes", ["class_0"])
)
ds_list = DatasetCatalog.get("test_adhoc_ds2@1classes")
self.assertEqual(len(ds_list), 5)
@tempdir
def test_register_coco_dataset_registry(self, tmp_dir):
dummy_buffer = []
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_dummy_function_coco(dataset_name, split_dict):
dummy_buffer.append((dataset_name, split_dict))
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_test_registry"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION",
"_register_dummy_function_coco",
]
]
)
runner.register(cfg)
self.assertTrue(len(dummy_buffer) == 1)
@tempdir
def test_adhoc_register_coco_dataset_registry(self, tmp_dir):
dummy_buffer = []
def _dummy_load_func():
return []
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_dummy_function_coco_adhoc(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
DatasetCatalog.register(dataset_name, _dummy_load_func)
MetadataCatalog.get(dataset_name).set(
evaluator_type="coco",
json_file=json_file,
image_root=image_root,
)
dummy_buffer.append((dataset_name, split_dict))
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_test_registry_adhoc"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION",
"_register_dummy_function_coco_adhoc",
]
]
)
runner.register(cfg)
self.assertTrue(len(dummy_buffer) == 1)
# Add adhoc class that uses only the first class
AdhocDatasetManager.add(
COCOWithClassesToUse("inj_test_registry_adhoc", ["class_0"])
)
# Check that the correct register function is used
self.assertTrue(len(dummy_buffer) == 2)
|
d2go-main
|
tests/data/test_d2go_datasets.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.data.transforms import tensor as tensor_aug
from detectron2.data.transforms.augmentation import AugmentationList
class TestDataTransformsTensor(unittest.TestCase):
def test_tensor_aug(self):
"""Data augmentation that that allows torch.Tensor as input"""
img = torch.ones(3, 8, 6)
augs = [tensor_aug.Tensor2Array(), tensor_aug.Array2Tensor()]
inputs = tensor_aug.AugInput(image=img)
transforms = AugmentationList(augs)(inputs)
self.assertArrayEqual(img, inputs.image)
# inverse is the same as itself
out_img = transforms.inverse().apply_image(img)
self.assertArrayEqual(img, out_img)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
|
d2go-main
|
tests/data/test_data_transforms_tensor.py
|
d2go-main
|
tests/skip_init/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import sys
import unittest
from d2go.initializer import initialize_all
from d2go.registry import bootstrap
# manual initialize without bootstrap
initialize_all(boostrap_registries=False)
def _unimport(package_name):
# remove sub modules from sys
modules = [
key
for key in sys.modules
if (
(key == package_name or key.startswith(package_name + "."))
# previent the parent package of this file being removed
and not __name__.startswith(key)
)
]
for key in sorted(modules, reverse=True):
sys.modules.pop(key)
# invalidate the cache of removed sub modules
importlib.invalidate_caches()
class TestRegistryBootstrap(unittest.TestCase):
def setUp(self):
# NOTE: reload this file since the imported modules (eg. `d2go.registry.bootstrap`)
# might be "unimported" during `tearDown`.
importlib.reload(sys.modules[__name__])
def tearDown(self):
# NOTE: "unimport" bootstrapped libraries, so that each test runs like starting
# a new python program.
# TODO: match list with the bootstrapped packages
_unimport("d2go.registry")
_unimport("mobile_cv")
_unimport("detectron2")
def test_bootstrap_core_lib(self):
self.assertFalse(bootstrap._IS_BOOTSTRAPPED)
bootstrap.bootstrap_registries(enable_cache=False, catch_exception=False)
self.assertTrue(bootstrap._IS_BOOTSTRAPPED)
def test_bootstrap_with_cache(self):
self.assertFalse(bootstrap._IS_BOOTSTRAPPED)
bootstrap.bootstrap_registries(enable_cache=True, catch_exception=False)
self.assertTrue(bootstrap._IS_BOOTSTRAPPED)
|
d2go-main
|
tests/skip_init/test_registries_bootstrap.py
|
d2go-main
|
tests/trainer/__init__.py
|
|
#!/usr/bin/env fbpython
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import os
import unittest
from typing import Dict, List
import torch
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.runner.default_runner import Detectron2GoRunner
from d2go.trainer.activation_checkpointing import (
ActivationCheckpointModelingHook,
add_activation_checkpoint_configs,
)
from d2go.utils.testing.data_loader_helper import create_local_dataset
from d2go.utils.testing.helper import tempdir
from detectron2.structures import ImageList
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointWrapper,
)
@META_ARCH_REGISTRY.register()
class MetaArchForTestAC(torch.nn.Module):
def __init__(self, cfg: CfgNode) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.linear = torch.nn.Linear(4, 4)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
@property
def device(self) -> torch._C.device:
return self.conv1.weight.device
def forward(self, inputs: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
images = [x["image"] for x in inputs]
images = ImageList.from_tensors(images, 1)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return {"loss": ret.norm()}
def _get_cfg(runner, output_dir, dataset_name):
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "MetaArchForTestAC"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 3
cfg.SOLVER.STEPS = []
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 3
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.MODEL_EMA.ENABLED = True
cfg.OUTPUT_DIR = output_dir
return cfg
class TestActivationCheckpointing(unittest.TestCase):
def test_ac_config(self) -> None:
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
self.assertTrue(isinstance(cfg.ACTIVATION_CHECKPOINT, CfgNode))
self.assertEqual(cfg.ACTIVATION_CHECKPOINT.REENTRANT, False)
self.assertEqual(
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY, "always_wrap_policy"
)
self.assertEqual(cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS, [])
def test_ac_modeling_hook_apply(self) -> None:
"""Check that the hook is registered"""
self.assertTrue("ActivationCheckpointModelingHook" in mh.MODELING_HOOK_REGISTRY)
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
ac_hook = ActivationCheckpointModelingHook(cfg)
model = MetaArchForTestAC(cfg)
ac_hook.apply(model)
children = list(model.children())
self.assertTrue(len(children) == 5)
for child in children:
self.assertTrue(isinstance(child, CheckpointWrapper))
def test_ac_modeling_hook_autowrap(self) -> None:
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "layer_based_auto_wrap_policy"
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = ["Conv2d", "BatchNorm2d"]
ac_hook = ActivationCheckpointModelingHook(cfg)
model = MetaArchForTestAC(cfg)
ac_hook.apply(model)
self.assertTrue(isinstance(model.conv, CheckpointWrapper))
self.assertTrue(isinstance(model.bn, CheckpointWrapper))
self.assertFalse(isinstance(model.linear, CheckpointWrapper))
@tempdir
def test_ac_runner(self, tmp_dir) -> None:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
cfg.MODEL.MODELING_HOOKS = ["ActivationCheckpointModelingHook"]
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "layer_based_auto_wrap_policy"
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = ["Conv2d", "BatchNorm2d"]
cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=False)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, "model_0000002.pth")))
# resume training onto a non-AC-wrapped model
cfg.MODEL.MODELING_HOOKS = []
cfg.SOLVER.MAX_ITER = 6
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, "model_0000005.pth")))
|
d2go-main
|
tests/trainer/test_activation_checkpointing.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from d2go.initializer import initialize_all
# NOTE: by default a list of initializations will run whenever D2Go is first imported,
# so that users don't need to do any manual iniitialization other than importing `d2go`.
# Environment variable can be used to skip initialization for special cases like unit test
skip_initialization = os.environ.get("D2GO_IMPORT_SKIP_INITIALIZATION", "0") == "1"
if not skip_initialization:
initialize_all()
|
d2go-main
|
d2go/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.bootstrap import bootstrap_registries
from mobile_cv.common.misc.oss_utils import fb_overwritable
_INITIALIZED = False
def initialize_all(boostrap_registries: bool = False) -> None:
global _INITIALIZED
if _INITIALIZED:
return
_INITIALIZED = True
_initialize_all(boostrap_registries=boostrap_registries)
def _initialize_all(boostrap_registries: bool) -> None:
_setup_env()
_register_builtin_datasets()
_populate_registries()
if boostrap_registries:
bootstrap_registries(enable_cache=True, catch_exception=True)
# fmt: off
@fb_overwritable()
def _setup_env():
# register torch vision ops
from torchvision.ops import nms # noqa
# setup Detectron2 environments
from detectron2.utils.env import setup_environment as setup_d2_environment # isort:skip
setup_d2_environment()
@fb_overwritable()
def _register_builtin_datasets():
# Register D2 builtin datasets
import detectron2.data # noqa F401
@fb_overwritable()
def _populate_registries():
from d2go import optimizer # noqa
from d2go.data import dataset_mappers # noqa
from d2go.modeling.backbone import fbnet_v2 # noqa
# fmt: on
|
d2go-main
|
d2go/initializer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Extend the mobile_cv.torch.utils_pytorch.distributed_helper to add D2/D2Go specific
features, functions in this module share the same signatures as the ones from mobile_cv.
"""
import logging
from dataclasses import dataclass
from datetime import timedelta
from typing import Any, Callable, Dict, Optional, Tuple, TypeVar
import detectron2.utils.comm as d2_comm
import mobile_cv.torch.utils_pytorch.comm as mcv_comm
import torch
from d2go.config import CfgNode, temp_defrost
from d2go.utils.launch_environment import get_launch_environment
from mobile_cv.torch.utils_pytorch.comm import ( # noqa
BaseSharedContext,
get_shared_context,
set_shared_context,
)
from mobile_cv.torch.utils_pytorch.distributed_helper import (
DEFAULT_TIMEOUT,
DistributedParams,
enable_dist_process_groups,
launch as _launch,
launch_deco as _launch_deco,
save_return_deco,
)
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT") # return type
@dataclass
class D2GoSharedContext(BaseSharedContext):
"""
Shared context that can be initialied before launching the workers
passed to all workers.
"""
runner_shared_context: Any
# BC-compatible
def get_local_rank() -> int:
return mcv_comm.get_local_rank()
# BC-compatible
def get_num_processes_per_machine() -> int:
return mcv_comm.get_local_size()
def _maybe_convert_to_cpu_run(args, backend):
if get_launch_environment() == "local" and not torch.cuda.is_available():
assert len(args) > 0, args
cfg = args[0]
if isinstance(cfg, CfgNode) and cfg.MODEL.DEVICE == "cuda":
logger.warning(
"Detected that CUDA is not available on this machine, set MODEL.DEVICE"
" to cpu and backend to GLOO"
)
with temp_defrost(cfg):
cfg.MODEL.DEVICE = "cpu"
backend = "GLOO"
return args, backend
# Modify mobile_cv's `default_distributed_worker` to also setup D2's comm module
def distributed_worker(
main_func: Callable[..., _RT],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
backend: str,
init_method: Optional[str] = None,
dist_params: Optional[DistributedParams] = None,
return_save_file: Optional[str] = None,
timeout: timedelta = DEFAULT_TIMEOUT,
shared_context: Optional[BaseSharedContext] = None,
) -> _RT:
if shared_context:
set_shared_context(
shared_context
) # set the global shared context from the args passed in by mp spawn
dist_params = dist_params or DistributedParams.from_environ()
args, backend = _maybe_convert_to_cpu_run(args, backend)
with enable_dist_process_groups(backend, init_method, dist_params, timeout):
d2_comm._LOCAL_PROCESS_GROUP = mcv_comm._LOCAL_PROCESS_GROUP
# Now the D2's comm module should be fully functional
deco = save_return_deco(return_save_file, dist_params.global_rank)
return deco(main_func)(*args, **kwargs)
def launch_deco(**kwargs):
"""
launch_deco for d2go distributed worker
"""
return _launch_deco(launcher=launch, **kwargs)
def launch(
main_func: Callable[..., _RT],
num_processes_per_machine: int,
num_machines: int = 1,
machine_rank: int = 0,
dist_url: Optional[str] = None,
backend: str = "NCCL",
always_spawn: bool = False,
launch_method: str = "multiprocessing",
shared_context: Optional[D2GoSharedContext] = None,
timeout: timedelta = DEFAULT_TIMEOUT,
args: Tuple[Any, ...] = (),
kwargs: Dict[str, Any] = None,
) -> Dict[int, _RT]:
"""
D2Go's specialized launch method, it does a few more things on top of mcv's launch:
- Automatically convert GPU to CPU if CUDA is not available.
- Add D2Go-specific initialziation in the _distributed_worker.
"""
args, backend = _maybe_convert_to_cpu_run(args, backend)
return _launch(
main_func=main_func,
num_processes_per_machine=num_processes_per_machine,
num_machines=num_machines,
machine_rank=machine_rank,
dist_url=dist_url,
backend=backend,
always_spawn=always_spawn,
launch_method=launch_method,
shared_context=shared_context,
timeout=timeout,
args=args,
kwargs=kwargs,
_distributed_worker=distributed_worker,
)
|
d2go-main
|
d2go/distributed.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
from typing import Callable, List, Optional, Tuple, Type, TypeVar, Union
import detectron2.utils.comm as comm
import torch
from d2go.config import (
auto_scale_world_size,
CfgNode,
load_full_config_from_file,
reroute_config_path,
temp_defrost,
)
from d2go.config.utils import get_diff_cfg
from d2go.distributed import (
D2GoSharedContext,
get_local_rank,
get_num_processes_per_machine,
)
from d2go.runner import import_runner
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.default_runner import BaseRunner
from d2go.runner.lightning_task import DefaultTask
from d2go.utils.helper import run_once
from d2go.utils.launch_environment import get_launch_environment
from d2go.utils.logging import initialize_logging, replace_print_with_logging
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger as _setup_logger
from detectron2.utils.serialize import PicklableWrapper
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT")
@run_once()
def setup_root_logger(logging_level: int = logging.INFO) -> None:
"""
Sets up the D2Go root logger. When a new logger is created, it lies in a tree.
If the logger being used does not have a specific level being specified, it
will default to using its parent logger. In this case, by setting the root
logger level to debug, or what is given, we change the default behaviour
for all loggers.
See https://docs.python.org/3/library/logging.html for a more in-depth
description
"""
initialize_logging(logging_level)
replace_print_with_logging()
def basic_argument_parser(
distributed=True,
requires_output_dir=True,
):
"""Basic cli tool parser for Detectron2Go binaries"""
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--save-return-file",
help="When given, the main function outputs will be serialized and saved to this file",
default=None,
type=str,
)
parser.add_argument(
"--disable-post-mortem",
action="store_true",
help="whether to NOT connect pdb on failure, which only works locally",
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument("--run-as-worker", type=bool, default=False)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
return parser
def build_basic_cli_args(
config_path: Optional[str] = None,
output_dir: Optional[str] = None,
runner_name: Optional[str] = None,
save_return_file: Optional[str] = None,
num_processes: Optional[Union[int, str]] = None,
num_machines: Optional[Union[int, str]] = None,
machine_rank: Optional[Union[int, str]] = None,
dist_url: Optional[str] = None,
dist_backend: Optional[str] = None,
disable_post_mortem: bool = False,
run_as_worker: bool = False,
# Evaluator args below
predictor_path: Optional[str] = None,
num_threads: Optional[int] = None,
caffe2_engine: Optional[int] = None,
caffe2_logging_print_net_summary: Optional[int] = None,
) -> List[str]:
"""
Returns parameters in the form of CLI arguments for the binary using
basic_argument_parser to set up its argument parser.
For the parameters definition and meaning, see basic_argument_parser.
"""
args: List[str] = []
if config_path is not None:
args += ["--config-file", config_path]
if output_dir is not None:
args += ["--output-dir", output_dir]
if runner_name is not None:
args += ["--runner", runner_name]
if save_return_file is not None:
args += ["--save-return-file", str(save_return_file)]
if disable_post_mortem:
args += ["--disable-post-mortem"]
if run_as_worker:
args += ["--run-as-worker", str(run_as_worker)]
if num_processes is not None:
args += ["--num-processes", str(num_processes)]
if num_machines is not None:
args += ["--num-machines", str(num_machines)]
if machine_rank is not None:
args += ["--machine-rank", str(machine_rank)]
if dist_url is not None:
args += ["--dist-url", str(dist_url)]
if dist_backend is not None:
args += ["--dist-backend", str(dist_backend)]
if predictor_path is not None:
args += ["--predictor-path", predictor_path]
if num_threads is not None:
args += ["--num-threads", int(num_threads)]
if caffe2_engine is not None:
args += ["--caffe2-engine", int(caffe2_engine)]
if caffe2_logging_print_net_summary is not None:
args += [
"--caffe2_logging_print_net_summary",
str(caffe2_logging_print_net_summary),
]
return args
def create_cfg_from_cli(
config_file: str,
overwrites: Optional[List[str]],
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> CfgNode:
"""
Centralized function to load config object from config file. It currently supports:
- YACS based config (return yacs's CfgNode)
"""
config_file = reroute_config_path(config_file)
with PathManager.open(config_file, "r") as f:
# TODO: switch to logger, note that we need to initilaize logger outside of main
# for running locally.
print("Loaded config file {}:\n{}".format(config_file, f.read()))
if isinstance(runner_class, str):
print(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if runner_class is None or issubclass(runner_class, RunnerV2Mixin):
# Runner-less API
cfg = load_full_config_from_file(config_file)
else:
# backward compatible for old API
cfg = runner_class.get_default_cfg()
cfg.merge_from_file(config_file)
cfg.merge_from_list(overwrites or [])
cfg.freeze()
return cfg
def prepare_for_launch(
args,
) -> Tuple[CfgNode, str, str]:
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
logger.info(args)
cfg = create_cfg_from_cli(
config_file=args.config_file,
overwrites=args.opts,
runner_class=args.runner,
)
# overwrite the output_dir based on config if output is not set via cli
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, args.runner
def maybe_override_output_dir(cfg: CfgNode, output_dir: str):
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
def setup_before_launch(
cfg: CfgNode,
output_dir: str,
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> Union[None, D2GoSharedContext]:
"""
Setup logic before spawning workers. Including:
- Shared context initilization to be passed to all workers
"""
if isinstance(runner_class, str):
logger.info(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if hasattr(runner_class, "create_shared_context"):
return runner_class.create_shared_context(cfg)
return None
def setup_after_launch(
cfg: CfgNode,
output_dir: str,
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> Union[None, BaseRunner, Type[DefaultTask]]:
"""
Binary-level setup after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- printing and dumping config
- (optional) initializing runner
"""
create_dir_on_global_main_process(output_dir)
setup_loggers(output_dir)
log_system_info()
cfg.freeze()
maybe_override_output_dir(cfg, output_dir)
logger.info("Running with full config:\n{}".format(cfg))
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
if isinstance(runner_class, str):
logger.info(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if issubclass(runner_class, DefaultTask):
# TODO(T123679504): merge this with runner code path to return runner instance
logger.info(f"Importing lightning task: {runner_class} ...")
runner = runner_class
elif issubclass(runner_class, BaseRunner):
logger.info(f"Initializing runner: {runner_class} ...")
runner = runner_class()
runner = initialize_runner(runner, cfg)
logger.info("Running with runner: {}".format(runner))
else:
assert runner_class is None, f"Unsupported runner class: {runner_class}"
runner = None
# save the diff config
default_cfg = (
runner_class.get_default_cfg()
if runner_class and not issubclass(runner_class, RunnerV2Mixin)
else cfg.get_default_cfg()
)
dump_cfg(
get_diff_cfg(default_cfg, cfg),
os.path.join(output_dir, "diff_config.yaml"),
)
# scale the config after dumping so that dumped config files keep original world size
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
# avoid random pytorch and CUDA algorithms during the training
if cfg.SOLVER.DETERMINISTIC:
logging.warning("Using deterministic training for the reproducibility")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
# reference: https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
if cfg.SEED > 0:
seed_all_rng(cfg.SEED)
return runner
def setup_logger(
module_name: str,
output_dir: str,
abbrev_name: Optional[str] = None,
color: Optional[bool] = None,
) -> logging.Logger:
if not color:
color = get_launch_environment() == "local"
if not abbrev_name:
abbrev_name = module_name
logger = _setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name=module_name,
abbrev_name=abbrev_name,
enable_propagation=True,
configure_stdout=False,
)
return logger
@run_once()
def setup_loggers(output_dir):
# Setup logging in each of the distributed processes.
setup_root_logger()
setup_logger("detectron2", output_dir, abbrev_name="d2")
setup_logger("fvcore", output_dir)
setup_logger("d2go", output_dir)
setup_logger("mobile_cv", output_dir)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
def log_system_info():
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
wf_id = os.getenv("WORKFLOW_RUN_ID", None)
if wf_id is not None:
logger.info("FBLearner Flow Run ID: {}".format(wf_id))
logger.info("Environment info:\n" + collect_env_info())
try:
from detectron2.fb.utils import print_fbcode_info
print_fbcode_info()
except ImportError:
pass
def dump_cfg(cfg: CfgNode, path: str) -> None:
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_global_main_process(path: str) -> None:
if comm.get_rank() == 0 and path:
PathManager.mkdirs(path)
# Add a barrier to make sure the existance of the dir for non-master process
comm.synchronize()
def initialize_runner(runner: BaseRunner, cfg: CfgNode) -> BaseRunner:
assert runner is not None, "now always requires a runner instance"
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func: Callable[..., _RT]) -> Callable[..., _RT]:
def new_main_func(cfg, output_dir, *args, **kwargs) -> _RT:
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
|
d2go-main
|
d2go/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import uuid
from contextlib import ContextDecorator
from d2go.checkpoint.log_checkpoint import log_checkpoint
logger = logging.getLogger(__name__)
class instrument_checkpoint(ContextDecorator):
def __init__(
self,
checkpoint_type: str,
) -> None:
super().__init__()
self.unique_id = uuid.uuid1().int >> 97
self.checkpoint_type = checkpoint_type
def __enter__(self) -> "instrument_checkpoint":
log_checkpoint(
checkpoint_type=self.checkpoint_type,
unique_id=self.unique_id,
state="begin",
)
return self
def __exit__(self, exc_type, exc_value, tb) -> bool:
log_checkpoint(
checkpoint_type=self.checkpoint_type,
unique_id=self.unique_id,
state="end",
)
if exc_value is not None:
# Re-raising the exception, otherwise it will be swallowed
raise exc_value
return True
|
d2go-main
|
d2go/checkpoint/checkpoint_instrumentation.py
|
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import json
import os
from typing import Callable, cast, IO
import detectron2.utils.comm as comm
import torch
from d2go.checkpoint.checkpoint_instrumentation import instrument_checkpoint
from d2go.checkpoint.utils import (
gather_ema_state_dict,
gather_optimizer_state_dict,
scatter_ema_state_dict,
scatter_optimizer_state_dict,
)
from d2go.quantization.modeling import QATCheckpointer
from d2go.trainer.fsdp import FSDPWrapper
from d2go.utils.misc import _log_api_usage_on_main_process
from mobile_cv.torch.utils_pytorch.distributed_helper import interleave_by_rank
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
LOG_API_IDENTIFIER = "checkpointing.FSDPCheckpointer"
def get_max_checkpoint_concurrency() -> int:
return comm.get_world_size()
# TODO: replace FSDPCheckpointer with central D2GoCheckpointer
class FSDPCheckpointer(QATCheckpointer):
"""
Extend the Checkpointer to support saving/loading FSDP models
"""
def __init__(
self,
*args,
concurrency_limit_fetcher: Callable[[], int] = get_max_checkpoint_concurrency,
**kwargs,
):
super().__init__(*args, **kwargs)
self._concurrency_limit_fetcher: Callable[[], int] = concurrency_limit_fetcher
def is_distributed(self) -> bool:
return True
@instrument_checkpoint("load")
def load(self, path: str, checkpointables=None):
"""
Add support for loading sharded optimizer states in FSDP.
.. note:: Loading optimizer states from regular checkpoints into FSDP models is currently not supported.
In general users should not resume non-FSDP training with FSDP.
"""
if isinstance(self.model, FSDPWrapper):
load_path = path
if path:
# loading path is a directory: local or sharded state dict is used
if self.path_manager.isdir(path):
# Get state dict type from metadata file
metadata = self._load_metadata(path)
state_dict_type = (
metadata["state_dict_type"] if metadata else "LOCAL_STATE_DICT"
)
assert state_dict_type in ["LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
self.logger.info(
f"[FSDPCheckpointer] Loading from {state_dict_type} checkpoint ..."
)
self.model.load_state_dict_type = StateDictType[state_dict_type]
load_path = os.path.join(path, f"rank{comm.get_rank()}.pth")
# loading path is a file: full global state dict is used
else:
self.logger.info(
"[FSDPCheckpointer] Loading from FULL_STATE_DICT checkpoint ..."
)
self.model.load_state_dict_type = StateDictType.FULL_STATE_DICT
_log_api_usage_on_main_process(
f"{LOG_API_IDENTIFIER}.load.fsdp.{self.model.load_state_dict_type.name}" # pyre-ignore
)
# Convert local ckpt to global ckpt when we load from a local ckpt but want to save to global ckpt
convert_local_ckpt_to_global = (
path
and self.model.load_state_dict_type == StateDictType.LOCAL_STATE_DICT
and self.model.state_dict_type == StateDictType.FULL_STATE_DICT
)
# Load all checkpointables from local ckpt if we want to convert to global ckpt
checkpointables_iter = (
self.checkpointables.keys()
if checkpointables is None or convert_local_ckpt_to_global
else checkpointables
)
checkpointables_filtered = [
name
for name in checkpointables_iter
if name not in ["optimizer", "ema_state"]
]
checkpoint = super().load(
load_path, checkpointables=checkpointables_filtered
)
if "optimizer" in checkpointables_iter:
self.logger.info(
f"[FSDPCheckpointer] Loading optimizer from {load_path} ..."
)
optimizer = self.checkpointables["optimizer"]
osd = checkpoint.pop("optimizer")
scatter_optimizer_state_dict(optimizer, osd, self.model)
if "ema_state" in checkpointables_iter:
self.logger.info(
f"[FSDPCheckpointer] Loading ema_state from {load_path} ..."
)
ema_state = checkpoint.pop("ema_state")
scatter_ema_state_dict(ema_state, self.model)
# Convert local ckpt by resaving the current state
if convert_local_ckpt_to_global:
self.logger.info(
"[FSDPCheckpointer] Converting local FSDP checkpoint to global checkpoint ..."
)
self.save(os.path.basename(path), tag_last_ckpt=False, **checkpoint)
self.logger.info(
"[FSDPCheckpointer] Local-to-global checkpoint conversion finishes"
)
# return all remaining checkpoints
return checkpoint
else:
_log_api_usage_on_main_process(f"{LOG_API_IDENTIFIER}.load.ddp")
return super().load(path, checkpointables=checkpointables)
@instrument_checkpoint("save")
def save(self, name: str, tag_last_ckpt=True, **kwargs) -> None:
"""
Add support for saving sharding models and optimizers.
The rest of the code is copied from implementation in the superclass
"""
# checkpoint_type is used to annotate preemption checkpoints for internal checkpointer. Ignore it here
kwargs.pop("checkpoint_type", None)
# If no sharding, only the main process enters the saving codepath;
# otherwise, all processes need to call state_dict() to enable state broadcasting among ranks
if not isinstance(self.model, FSDPWrapper):
_log_api_usage_on_main_process(f"{LOG_API_IDENTIFIER}.save.ddp")
if comm.is_main_process():
return super().save(name, **kwargs)
return
_log_api_usage_on_main_process(
f"{LOG_API_IDENTIFIER}.save.fsdp.{self.model.state_dict_type.name}"
)
data = {}
# FSDP: model.state_dict() needs to be called by all ranks before saving
data["model"] = self.model.state_dict()
for key, obj in self.checkpointables.items():
if key == "optimizer":
data[key] = gather_optimizer_state_dict(obj, self.model)
elif key == "ema_state":
data[key] = gather_ema_state_dict(obj, self.model)
else:
data[key] = obj.state_dict()
data.update(kwargs)
# If using full state dict, only the main process does checkpoint saving; Otherwise, all processes do
if self.model.state_dict_type != StateDictType.FULL_STATE_DICT:
# Main process creates directory for local saves
new_save_dir = os.path.join(self.save_dir, name)
if comm.is_main_process():
if not self.path_manager.exists(new_save_dir):
self.path_manager.mkdirs(new_save_dir)
comm.synchronize()
# Saving checkpoints
basename = "rank{}.pth".format(comm.get_rank())
save_file = os.path.join(new_save_dir, basename)
assert os.path.basename(save_file) == basename, basename
# Limit the write concurrency to avoid QPS overload
with interleave_by_rank(
concurrency_limit=self._concurrency_limit_fetcher()
):
self._save_file(data, save_file)
# Main process tags last checkpoint if no errors in all processes
comm.synchronize()
if comm.is_main_process():
self._save_metadata(new_save_dir)
if tag_last_ckpt:
self.tag_last_checkpoint(name)
elif comm.is_main_process():
basename = "{}.pth".format(name)
save_file = os.path.join(self.save_dir, basename)
assert os.path.basename(save_file) == basename, basename
self._save_file(data, save_file)
if tag_last_ckpt:
self.tag_last_checkpoint(basename)
def _save_file(self, data, filename):
self.logger.info("Saving checkpoint to {}".format(filename))
with self.path_manager.open(filename, "wb") as f:
torch.save(data, cast(IO[bytes], f))
def _load_file(self, f: str):
# Limit the read concurrency to avoid QPS overload
with interleave_by_rank(concurrency_limit=self._concurrency_limit_fetcher()):
return super()._load_file(f)
def _save_metadata(self, path):
metadata_file = os.path.join(path, "metadata.json")
obj = {"state_dict_type": self.model.state_dict_type.name}
with self.path_manager.open(metadata_file, "w") as f:
json.dump(obj, f)
def _load_metadata(self, path):
metadata_file = os.path.join(path, "metadata.json")
if self.path_manager.exists(metadata_file):
with self.path_manager.open(metadata_file, "r") as f:
return json.load(f)
else:
return None
|
d2go-main
|
d2go/checkpoint/fsdp_checkpoint.py
|
from d2go.checkpoint.api import is_distributed_checkpoint
from d2go.checkpoint.fsdp_checkpoint import FSDPCheckpointer
__all__ = [
"is_distributed_checkpoint",
"FSDPCheckpointer",
]
|
d2go-main
|
d2go/checkpoint/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
@fb_overwritable()
def log_checkpoint(checkpoint_type=str, unique_id=int, state=str) -> None:
logger.info(f"Checkpoint:{unique_id} {checkpoint_type} {state} ")
|
d2go-main
|
d2go/checkpoint/log_checkpoint.py
|
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
from fvcore.common.checkpoint import Checkpointer
def is_distributed_checkpoint(checkpointer: Checkpointer) -> bool:
"""
Check if checkpointer supports distributed checkpointing,
in which case all ops need to be invoked in every rank.
"""
if hasattr(checkpointer, "is_distributed"):
return checkpointer.is_distributed()
return False
|
d2go-main
|
d2go/checkpoint/api.py
|
import copy
from d2go.modeling.ema import EMAState
from d2go.trainer.fsdp import FSDPWrapper
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
StateDictType,
)
def gather_optimizer_state_dict(optimizer, model: FSDPWrapper):
"""
Get full/local optimizer state dict from an FSDP model.
"""
# FSDP: full_optim_state_dict() needs to be called by all ranks
if model.state_dict_type == StateDictType.FULL_STATE_DICT:
return FSDP.full_optim_state_dict(
model, optim=optimizer, rank0_only=model.rank0_only
)
elif model.state_dict_type == StateDictType.SHARDED_STATE_DICT:
return FSDP.sharded_optim_state_dict(model, optim=optimizer)
return optimizer.state_dict()
def scatter_optimizer_state_dict(optimizer, optim_state_dict, model: FSDPWrapper):
"""
Load a full/local optimizer state dict to a FSDP model.
If using full state dict, shard and scatter the optimizer state dict before loading
"""
if model.load_state_dict_type == StateDictType.FULL_STATE_DICT:
optim_state_dict = FSDP.shard_full_optim_state_dict(
optim_state_dict, model, optim=optimizer
)
elif model.load_state_dict_type == StateDictType.SHARDED_STATE_DICT:
optim_state_dict = FSDP.flatten_sharded_optim_state_dict(
optim_state_dict, model, optim=optimizer
)
optimizer.load_state_dict(optim_state_dict)
def gather_ema_state_dict(ema_state, model: FSDPWrapper):
"""
Get full/local EMA state dict from an FSDP model.
If using full state dict, gather local sharded EMA states from all FSDP processes and aggregate them into a full EMA state dict
"""
if model.state_dict_type == StateDictType.FULL_STATE_DICT:
# Apply local ema states to the model and unshard them
with ema_state.apply_and_restore(model):
with FSDP.summon_full_params(
model,
writeback=False,
offload_to_cpu=model.offload_to_cpu,
rank0_only=model.rank0_only,
):
state = EMAState.FromModel(model)
return state.state
elif model.state_dict_type == StateDictType.SHARDED_STATE_DICT:
with ema_state.apply_and_restore(model):
# must deepcopy the state dict, else we return a reference to the model state
return dict(copy.deepcopy(model.state_dict()))
else:
return ema_state.state_dict()
def scatter_ema_state_dict(ema_state_dict, model: FSDPWrapper):
"""
Load a full/sharded/local EMA state dict to a FSDP model.
If loading full state dict, ema_state_dict needs to be properly sharded for each FSDP process to store locally
Note that, at load-time, model.state_dict_type is automatically set to the type of the state dict being loaded
by accessing metadata, so there's no possibility of a save-load mismatch
"""
if model.load_state_dict_type == StateDictType.FULL_STATE_DICT:
# Store the current model state.
old_local_state = EMAState.FromModel(model)
# Apply ema_state as a FULL state dict to the model so it can be properly sharded
# Currently only [offload_to_cpu=False, rank0_only=False] is supported
with FSDP.summon_full_params(
model,
writeback=True,
offload_to_cpu=False,
rank0_only=False,
):
ema_state = EMAState()
ema_state.load_state_dict(ema_state_dict)
ema_state.apply_to(model)
# Load ema_state from model
model.ema_state.save_from(model)
# Restore the old model state
old_local_state.apply_to(model)
elif model.load_state_dict_type == StateDictType.SHARDED_STATE_DICT:
# Store current model state temporarily
old_state = EMAState.FromModel(model)
# Load the ema state dict into the model
model.load_state_dict(ema_state_dict)
# Save ema state with correct FQNs via EMAState.save_from
model.ema_state.save_from(model)
# restore old model state
old_state.apply_to(model)
else:
model.ema_state.load_state_dict(ema_state_dict)
|
d2go-main
|
d2go/checkpoint/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
import os
from typing import Any, Dict, List, Optional, Union
import detectron2.utils.comm as comm
import torch
from d2go.utils.parse_module_params import iterate_module_named_parameters
from detectron2.solver.build import (
maybe_add_gradient_clipping as d2_maybe_add_gradient_clipping,
reduce_param_groups,
)
from detectron2.utils.file_io import PathManager
from detectron2.utils.registry import Registry
D2GO_OPTIM_MAPPER_REGISTRY = Registry("D2GO_OPTIM_MAPPER")
logger = logging.getLogger(__name__)
OptimizerModelsType = Union[torch.nn.Module, torch.nn.parallel.DistributedDataParallel]
def get_optimizer_param_groups(model: OptimizerModelsType, cfg):
"""
Get override optimizer parameter groups
* Get all default parameters
# Get parameter groups for normalization and bias
# Get parameter groups from model if the model implements `get_optimizer_param_groups()`
Parameters appear later will override parameters appear earlier
"""
# get all parameters that requires gradient
params = get_optimizer_param_groups_default(model)
# parameter groups for lr
params += get_optimizer_param_groups_lr(
model,
base_lr=cfg.SOLVER.BASE_LR,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
lr_multipliers_overwrite=_merge_dict(cfg.SOLVER.LR_MULTIPLIER_OVERWRITE),
)
# parameter groups for normalization, bias, and embedding
params += get_optimizer_param_groups_weight_decay(
model,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
weight_decay_embed=cfg.SOLVER.WEIGHT_DECAY_EMBED,
weight_decay_overwrite=_merge_dict(cfg.SOLVER.WEIGHT_DECAY_OVERWRITE),
)
# parameter groups from model function `model.get_optimizer_param_groups(opts)`
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
if hasattr(model, "get_optimizer_param_groups"):
logger.info(
"Getting optimizer parameter groups from model.get_optimizer_param_groups()"
)
params += model.get_optimizer_param_groups(cfg)
return reduce_param_groups(params)
def get_optimizer_param_groups_default(model: OptimizerModelsType):
ret = [
{
"params": list(
filter(
lambda x: x.requires_grad,
model.parameters(),
)
),
"param_names": [
name
for name, _param in filter(
lambda x: x[1].requires_grad, model.named_parameters()
)
],
}
]
return ret
def get_optimizer_param_groups_lr(
model: OptimizerModelsType,
base_lr: float,
bias_lr_factor: float = 1.0,
lr_multipliers_overwrite: Optional[Dict[str, float]] = None,
):
"""
Allow setting up lr for modules
base_lr: lr for all modules
bias_lr_factor: scale factor for lr for bias term
lr_multipliers_overwrite (dict: str-> float):
Applying different lr multiplier to a set of parameters whose names
containing certain keys. For example, if lr_multipliers_overwrite={'backbone': 0.1},
the LR for the parameters whose names containing 'backbone' will be scaled to 0.1x.
Set lr_multipliers_overwrite=None if no multipliers required.
"""
params: List[Dict[str, Any]] = []
for (
module_name,
_module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
cur_lr = base_lr
if module_param_name == "bias":
cur_lr = base_lr * bias_lr_factor
if lr_multipliers_overwrite is not None:
for kname, mult in lr_multipliers_overwrite.items():
if kname in module_name:
# apply multiplier for the params containing kname, e.g. backbone
cur_lr = cur_lr * mult
params += [
{
"param_names": [module_name + "." + module_param_name],
"params": [value],
"lr": cur_lr,
}
]
return params
def get_optimizer_param_groups_weight_decay(
model: OptimizerModelsType,
weight_decay: Optional[float],
weight_decay_norm: Optional[float] = None,
weight_decay_bias: Optional[float] = None,
weight_decay_embed: Optional[float] = None,
weight_decay_overwrite: Optional[Dict[str, float]] = None,
):
"""
Allow setting up weight decay for normalization, embedding and bias
"""
if weight_decay_norm is None:
weight_decay_norm = weight_decay
if weight_decay_bias is None:
weight_decay_bias = weight_decay
if weight_decay_embed is None:
weight_decay_embed = weight_decay
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
for (
module_name,
module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
cur_wd = weight_decay
if isinstance(module, norm_module_types):
cur_wd = weight_decay_norm
elif isinstance(module, torch.nn.Embedding):
cur_wd = weight_decay_embed
elif module_param_name == "bias":
cur_wd = weight_decay_bias
if weight_decay_overwrite is not None:
for kname, wd in weight_decay_overwrite.items():
if kname in module_param_name:
cur_wd = wd
if cur_wd is not None:
params += [
{
"param_names": [module_name + "." + module_param_name],
"params": [value],
"weight_decay": cur_wd,
}
]
return params
def get_optimizer_param_groups_override(
model: OptimizerModelsType,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
):
"""
Allow setting up overrides for parameter groups
overrides (dict: str -> (dict: str -> float)):
if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
{"embedding": {"lr": 0.01, "weight_decay": 0.1}} will set the LR and
weight decay values for all module parameters named `embedding` (default: None)
"""
params: List[Dict[str, Any]] = []
if overrides is None:
return params
for (
_module_name,
_module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
schedule_params = {}
if module_param_name in overrides:
schedule_params.update(overrides[module_param_name])
params += [{"params": [value], **schedule_params}]
return params
def maybe_add_gradient_clipping(cfg, optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
if enable:
return FullModelGradientClippingOptimizer
return d2_maybe_add_gradient_clipping(cfg, optim)
def _merge_dict(in_dict):
ret_dict = {}
assert all(isinstance(x, dict) for x in in_dict)
for dic in in_dict:
ret_dict.update(dic)
return ret_dict
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def sgd(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params=params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
foreach=True,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adam(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(
params=params, lr=cfg.SOLVER.BASE_LR, betas=cfg.SOLVER.BETAS, eps=cfg.SOLVER.EPS
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adamw(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.AdamW)(
params=params,
lr=cfg.SOLVER.BASE_LR,
betas=cfg.SOLVER.BETAS,
eps=cfg.SOLVER.EPS,
foreach=True if cfg.SOLVER.FUSED is False else False,
fused=True if cfg.SOLVER.FUSED else False,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def sgd_mt(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build a multi_tensor SGD optimizer that works significantly faster.
This version is expected to be the default implementation for SGD
optimizer by end of H1'21. To benefit from the speedup, the number
of parameter groups needs to be reduced using `reduce_param_groups`.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim._multi_tensor.SGD)(
params=params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adamw_mt(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build a multi_tensor adamw optimizer that works significantly faster.
This version is expected to be the default implementation for adamw
optimizer by end of H1'21. To benefit from the speedup, the number
of parameter groups needs to be reduced using `reduce_param_groups`.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim._multi_tensor.AdamW)(
params=params, lr=cfg.SOLVER.BASE_LR, eps=cfg.SOLVER.EPS
)
def build_optimizer_mapper(cfg, model):
name = cfg.SOLVER.OPTIMIZER
optimizer = D2GO_OPTIM_MAPPER_REGISTRY.get(name.lower())(cfg, model)
def _param_group_str(group, verbose=False):
ret = {x: y for x, y in group.items() if x != "params" and x != "param_names"}
ret["params"] = len(group["params"])
ret = sorted(ret.items())
ret = [f"{x[0]}: {x[1]}" for x in ret]
if verbose and "param_names" in group:
param_name_str = "\n" + "\n".join(group["param_names"]) + "\n"
ret.append(f"param_names: {param_name_str}")
ret = "{" + ", ".join(ret) + "}"
return ret
def _param_groups_str(groups, verbose=False):
ret = ""
for idx, group in enumerate(groups):
ret += f"Param group {idx}: {_param_group_str(group, verbose=verbose)}\n"
return ret
logger.info(f"Using optimizer:\n{optimizer}")
logger.info(
f"optimizer parameter groups:\n{_param_groups_str(optimizer.param_groups)}"
)
if (
comm.is_main_process()
and hasattr(cfg, "OUTPUT_DIR")
and PathManager.isdir(cfg.OUTPUT_DIR)
):
param_groups_str_verbose = _param_groups_str(
optimizer.param_groups, verbose=True
)
output_file = os.path.join(cfg.OUTPUT_DIR, "param_groups.txt")
if PathManager.isfile(output_file):
logger.warning("param_groups.txt already exists")
else:
logger.info(f"Write parameter groups to file: {output_file}")
with PathManager.open(output_file, "w") as f:
f.write(param_groups_str_verbose)
return optimizer
|
d2go-main
|
d2go/optimizer/build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.optimizer.build import build_optimizer_mapper
__all__ = ["build_optimizer_mapper"]
|
d2go-main
|
d2go/optimizer/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import copy
import logging
from typing import List
from unittest import mock
import yaml
from d2go.config.utils import reroute_config_path, resolve_default_config
from detectron2.config import CfgNode as _CfgNode
from fvcore.common.registry import Registry
logger = logging.getLogger(__name__)
CONFIG_CUSTOM_PARSE_REGISTRY = Registry("CONFIG_CUSTOM_PARSE")
def _opts_to_dict(opts: List[str]):
ret = {}
for full_key, v in zip(opts[0::2], opts[1::2]):
keys = full_key.split(".")
cur = ret
for key in keys[:-1]:
if key not in cur:
cur[key] = {}
cur = cur[key]
cur[keys[-1]] = v
return ret
class CfgNode(_CfgNode):
@classmethod
def cast_from_other_class(cls, other_cfg):
"""Cast an instance of other CfgNode to D2Go's CfgNode (or its subclass)"""
new_cfg = cls(other_cfg)
# copy all fields inside __dict__, this will preserve fields like __deprecated_keys__
for k, v in other_cfg.__dict__.items():
new_cfg.__dict__[k] = v
return new_cfg
def merge_from_file(self, cfg_filename: str, *args, **kwargs):
cfg_filename = reroute_config_path(cfg_filename)
with reroute_load_yaml_with_base():
res = super().merge_from_file(cfg_filename, *args, **kwargs)
self._run_custom_processing(is_dump=False)
return res
def merge_from_list(self, cfg_list: List[str]):
# NOTE: YACS's orignal merge_from_list could not handle non-existed keys even if
# new_allow is set, override the method for support this.
override_cfg = _opts_to_dict(cfg_list)
res = super().merge_from_other_cfg(CfgNode(override_cfg))
self._run_custom_processing(is_dump=False)
return res
def dump(self, *args, **kwargs):
cfg = copy.deepcopy(self)
cfg._run_custom_processing(is_dump=True)
return super(CfgNode, cfg).dump(*args, **kwargs)
@staticmethod
def load_yaml_with_base(filename: str, *args, **kwargs):
filename = reroute_config_path(filename)
with reroute_load_yaml_with_base():
return _CfgNode.load_yaml_with_base(filename, *args, **kwargs)
def __hash__(self):
# dump follows alphabetical order, thus good for hash use
return hash(self.dump())
def _run_custom_processing(self, is_dump=False):
"""Apply config load post custom processing from registry"""
frozen = self.is_frozen()
self.defrost()
for name, process_func in CONFIG_CUSTOM_PARSE_REGISTRY:
logger.info(f"Apply config processing: {name}, is_dump={is_dump}")
process_func(self, is_dump)
if frozen:
self.freeze()
def get_default_cfg(self):
"""Return the defaults for this instance of CfgNode"""
return resolve_default_config(self)
@contextlib.contextmanager
def temp_defrost(cfg):
is_frozen = cfg.is_frozen()
if is_frozen:
cfg.defrost()
yield cfg
if is_frozen:
cfg.freeze()
@contextlib.contextmanager
def temp_new_allowed(cfg: CfgNode):
is_new_allowed = cfg.is_new_allowed()
cfg.set_new_allowed(True)
yield cfg
cfg.set_new_allowed(is_new_allowed)
@contextlib.contextmanager
def reroute_load_yaml_with_base():
BASE_KEY = "_BASE_"
_safe_load = yaml.safe_load
_unsafe_load = yaml.unsafe_load
def _reroute_base(cfg):
if BASE_KEY in cfg:
if isinstance(cfg[BASE_KEY], list):
cfg[BASE_KEY] = [reroute_config_path(x) for x in cfg[BASE_KEY]]
else:
cfg[BASE_KEY] = reroute_config_path(cfg[BASE_KEY])
return cfg
def mock_safe_load(f):
cfg = _safe_load(f)
cfg = _reroute_base(cfg)
return cfg
def mock_unsafe_load(f):
cfg = _unsafe_load(f)
cfg = _reroute_base(cfg)
return cfg
with mock.patch("yaml.safe_load", side_effect=mock_safe_load):
with mock.patch("yaml.unsafe_load", side_effect=mock_unsafe_load):
yield
CONFIG_SCALING_METHOD_REGISTRY = Registry("CONFIG_SCALING_METHOD")
def auto_scale_world_size(cfg, new_world_size):
"""
Usually the config file is written for a specific number of devices, this method
scales the config (in-place!) according to the actual world size using the
pre-registered scaling methods specified as cfg.SOLVER.AUTO_SCALING_METHODS.
Note for registering scaling methods:
- The method will only be called when scaling is needed. It won't be called
if SOLVER.REFERENCE_WORLD_SIZE is 0 or equal to target world size. Thus
cfg.SOLVER.REFERENCE_WORLD_SIZE will always be positive.
- The method updates cfg in-place, no return is required.
- No need for changing SOLVER.REFERENCE_WORLD_SIZE.
Args:
cfg (CfgNode): original config which contains SOLVER.REFERENCE_WORLD_SIZE and
SOLVER.AUTO_SCALING_METHODS.
new_world_size: the target world size
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == new_world_size:
return cfg
if len(cfg.SOLVER.AUTO_SCALING_METHODS) == 0:
return cfg
original_cfg = cfg.clone()
frozen = original_cfg.is_frozen()
cfg.defrost()
assert len(cfg.SOLVER.AUTO_SCALING_METHODS) > 0, cfg.SOLVER.AUTO_SCALING_METHODS
for scaling_method in cfg.SOLVER.AUTO_SCALING_METHODS:
logger.info("Applying auto scaling method: {}".format(scaling_method))
CONFIG_SCALING_METHOD_REGISTRY.get(scaling_method)(cfg, new_world_size)
assert (
cfg.SOLVER.REFERENCE_WORLD_SIZE == cfg.SOLVER.REFERENCE_WORLD_SIZE
), "Runner's scale_world_size shouldn't change SOLVER.REFERENCE_WORLD_SIZE"
cfg.SOLVER.REFERENCE_WORLD_SIZE = new_world_size
if frozen:
cfg.freeze()
from d2go.config.utils import get_cfg_diff_table
table = get_cfg_diff_table(cfg, original_cfg)
logger.info("Auto-scaled the config according to the actual world size: \n" + table)
def load_full_config_from_file(filename: str) -> CfgNode:
loaded_cfg = CfgNode.load_yaml_with_base(filename)
loaded_cfg = CfgNode(loaded_cfg) # cast Dict to CfgNode
cfg = loaded_cfg.get_default_cfg()
cfg.merge_from_other_cfg(loaded_cfg)
return cfg
def convert_cfg_to_dict(cfg):
if not isinstance(cfg, CfgNode):
return cfg
else:
cfg_dict = dict(cfg)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_cfg_to_dict(v)
return cfg_dict
|
d2go-main
|
d2go/config/config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# forward the namespace to avoid `d2go.config.config`
from d2go.config.config import (
auto_scale_world_size,
CfgNode,
CONFIG_CUSTOM_PARSE_REGISTRY,
CONFIG_SCALING_METHOD_REGISTRY,
convert_cfg_to_dict,
load_full_config_from_file,
temp_defrost,
temp_new_allowed,
)
from d2go.config.utils import reroute_config_path
__all__ = [
"CONFIG_CUSTOM_PARSE_REGISTRY",
"CONFIG_SCALING_METHOD_REGISTRY",
"CfgNode",
"auto_scale_world_size",
"convert_cfg_to_dict",
"load_full_config_from_file",
"reroute_config_path",
"temp_defrost",
"temp_new_allowed",
]
|
d2go-main
|
d2go/config/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from enum import Enum
from typing import Any, Dict, List
import pkg_resources
from d2go.registry.builtin import CONFIG_UPDATER_REGISTRY
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
DEFAULTS_KEY = "_DEFAULTS_"
def reroute_config_path(path: str) -> str:
"""
Supporting rerouting the config files for convenience:
d2go:// -> mobile-vision/d2go/...
detectron2go:// -> mobile-vision/d2go/configs/...
detectron2:// -> vision/fair/detectron2/configs/...
Those config are considered as code, so they'll reflect your current checkout,
try using canary if you have local changes.
"""
assert isinstance(path, str), path
if path.startswith("d2go://"):
rel_path = path[len("d2go://") :]
return pkg_resources.resource_filename("d2go", rel_path)
elif path.startswith("detectron2go://"):
rel_path = path[len("detectron2go://") :]
return pkg_resources.resource_filename(
"d2go", os.path.join("configs", rel_path)
)
elif path.startswith("detectron2://"):
rel_path = path[len("detectron2://") :]
return pkg_resources.resource_filename(
"detectron2.model_zoo", os.path.join("configs", rel_path)
)
else:
return reroute_fb_config_path(path)
@fb_overwritable()
def reroute_fb_config_path(path: str) -> str:
return path
def flatten_config_dict(dic, reorder=True):
"""
Flattens nested dict into single layer dict, for example:
flatten_config_dict({
"MODEL": {
"FBNET_V2": {
"ARCH_DEF": "val0",
"ARCH": "val1:,
},
}
})
=> {"MODEL.FBNET_V2.ARCH_DEF": "val0", "MODEL.FBNET_V2.ARCH": "val1"}
Args:
dic (dict or CfgNode): a nested dict whose keys are strings.
reorder (bool): if True, the returned dict will be sorted according to the keys;
otherwise original order will be preserved.
Returns:
dic: a single-layer dict
"""
return _flatten_config_dict(dic, reorder=reorder, prefix="")
def _flatten_config_dict(x, reorder, prefix):
if not isinstance(x, dict):
return {prefix: x}
d = {}
for k in sorted(x.keys()) if reorder else x.keys():
v = x[k]
new_key = f"{prefix}.{k}" if prefix else k
d.update(_flatten_config_dict(v, reorder, new_key))
return d
def config_dict_to_list_str(config_dict: Dict) -> List[str]:
"""Creates a list of str given configuration dict
This can be useful to generate pretraining or overwrite opts
in D2Go when a user has config_dict
"""
d = flatten_config_dict(config_dict)
str_list = []
for k, v in d.items():
str_list.append(k)
str_list.append(str(v))
return str_list
def get_from_flattened_config_dict(dic, flattened_key, default=None):
"""
Reads out a value from the nested config dict using flattened config key (i.e. all
keys from each level put together with "." separator), the default value is returned
if the flattened key doesn't exist.
e.g. if the config dict is
MODEL:
TEST:
SCORE_THRESHOLD: 0.7
Then to access the value of SCORE_THRESHOLD, this API should be called
>> score_threshold = get_from_flattened_config_dict(cfg, "MODEL.TEST.SCORE_THRESHOLD")
"""
for k in flattened_key.split("."):
if k not in dic:
return default
dic = dic[k]
return dic
def get_cfg_diff_table(cfg, original_cfg):
"""
Print the different of two config dicts side-by-side in a table
"""
all_old_keys = list(flatten_config_dict(original_cfg, reorder=True).keys())
all_new_keys = list(flatten_config_dict(cfg, reorder=True).keys())
diff_table = []
if all_old_keys != all_new_keys:
logger = logging.getLogger(__name__)
mismatched_old_keys = set(all_old_keys) - set(all_new_keys)
mismatched_new_keys = set(all_new_keys) - set(all_old_keys)
logger.warning(
"Config key mismatched.\n"
f"Mismatched old keys: {mismatched_old_keys}\n"
f"Mismatched new keys: {mismatched_new_keys}"
)
for old_key in mismatched_old_keys:
old_value = get_from_flattened_config_dict(original_cfg, old_key)
diff_table.append([old_key, old_value, "Key not exists"])
for new_key in mismatched_new_keys:
new_value = get_from_flattened_config_dict(cfg, new_key)
diff_table.append([new_key, "Key not exists", new_value])
# filter out mis-matched keys
all_old_keys = [x for x in all_old_keys if x not in mismatched_old_keys]
all_new_keys = [x for x in all_new_keys if x not in mismatched_new_keys]
for full_key in all_new_keys:
old_value = get_from_flattened_config_dict(original_cfg, full_key)
new_value = get_from_flattened_config_dict(cfg, full_key)
if old_value != new_value:
diff_table.append([full_key, old_value, new_value])
from tabulate import tabulate
table = tabulate(
diff_table,
tablefmt="pipe",
headers=["config key", "old value", "new value"],
)
return table
def get_diff_cfg(old_cfg, new_cfg):
"""
outputs a CfgNode containing keys, values appearing in new_cfg and not in old_cfg.
If `new_allowed` is not set, then new keys will throw a KeyError
old_cfg: CfgNode, the original config, usually the dafulat
new_cfg: CfgNode, the full config being passed by the user
if new allowed is not set on new_cfg, key error is raised
returns: CfgNode, a config containing only key, value changes between old_cfg and new_cfg
example:
Cfg1:
SYSTEM:
NUM_GPUS: 2
TRAIN:
SCALES: (1, 2)
DATASETS:
train_2017:
17: 1
18: 1
Cfg2:
SYSTEM:
NUM_GPUS: 2
TRAIN:
SCALES: (4, 5, 8)
DATASETS:
train_2017:
17: 1
18: 1
get_diff_cfg(Cfg1, Cfg2) gives:
TRAIN:
SCALES: (8, 16, 32)
"""
def get_diff_cfg_rec(old_cfg, new_cfg, out):
for key in new_cfg.keys():
if key not in old_cfg.keys() and old_cfg.is_new_allowed():
out[key] = new_cfg[key]
elif old_cfg[key] != new_cfg[key]:
if type(new_cfg[key]) is type(out):
out[key] = out.__class__()
out[key] = get_diff_cfg_rec(old_cfg[key], new_cfg[key], out[key])
else:
out[key] = new_cfg[key]
return out
out = new_cfg.__class__()
diff_cfg = get_diff_cfg_rec(old_cfg, new_cfg, out)
# Keep the `_DEFAULTS_` even though they should be the same
old_defaults = old_cfg.get(DEFAULTS_KEY, None)
new_defaults = new_cfg.get(DEFAULTS_KEY, None)
assert (
old_defaults == new_defaults
), f"{DEFAULTS_KEY} doesn't match! old ({old_defaults}) vs new ({new_defaults})"
if new_defaults is not None:
diff_cfg[DEFAULTS_KEY] = new_defaults
return diff_cfg
def namedtuple_to_dict(obj: Any):
"""Convert NamedTuple or dataclass to dict so it can be used as config"""
res = {}
for k, v in obj.__dict__.items():
if isinstance(v, Enum):
# in case of enum, serialize the enum value
res[k] = v.value
else:
res[k] = v
return res
def resolve_default_config(cfg):
if DEFAULTS_KEY not in cfg:
raise ValueError(
f"Can't resolved default config because `{DEFAULTS_KEY}` is"
f" missing from cfg: \n{cfg}"
)
updater_names: List[str] = cfg[DEFAULTS_KEY]
assert isinstance(updater_names, list), updater_names
assert [isinstance(x, str) for x in updater_names], updater_names
logger.info(f"Resolving default config by applying updaters: {updater_names} ...")
# starting from a empty CfgNode, sequentially apply the generator
cfg = type(cfg)()
for name in updater_names:
updater = CONFIG_UPDATER_REGISTRY.get(name)
cfg = updater(cfg)
# the resolved default config should keep the same default generator
cfg[DEFAULTS_KEY] = updater_names
return cfg
|
d2go-main
|
d2go/config/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List
from d2go.config import CfgNode
from d2go.utils.gpu_memory_profiler import log_memory_snapshot, record_memory_history
from detectron2.engine.train_loop import HookBase
from detectron2.utils.registry import Registry
logger = logging.getLogger(__name__)
# List of functions to add hooks for trainer, all functions in the registry will
# be called to add hooks
# func(hooks: List[HookBase]) -> None
TRAINER_HOOKS_REGISTRY = Registry("TRAINER_HOOKS_REGISTRY")
def update_hooks_from_registry(hooks: List[HookBase], cfg: CfgNode):
for name, hook_func in TRAINER_HOOKS_REGISTRY:
logger.info(f"Update trainer hooks from {name}...")
hook_func(hooks, cfg)
class D2GoGpuMemorySnapshot(HookBase):
"""
A profiler that logs GPU memory snapshot during training.
There are three places that logging could happen:
1. start of training
d2go records memory snapshots before model instantiation and logs snapshots after `log_n_steps` iterations.
This is to capture the typical memory peak at model instantiation and the first few iterations
2. during training
d2go records memory snapshots at `log_during_train_at` iteration and logs snapshots after `log_n_steps` iterations.
This is to capture the stabilized memory utilization during training.
3. OOM
Right before OOM, the GPU memory snapshot will be logged to help diagnose OOM issues.
"""
def __init__(
self,
output_dir,
log_n_steps: int = 3,
log_during_train_at: int = 550,
trace_max_entries: int = 1000000,
) -> None:
self.output_dir = output_dir
self.step = 0
self.log_n_steps = log_n_steps
self.log_during_train_at = log_during_train_at
self.trace_max_entries = trace_max_entries
logger.warning(
"WARNING: Memory snapshot profiler is enabled. This may cause ranks to die and training jobs to get stuck. Please use with caution."
)
def before_step(self):
if self.trainer.iter == self.log_during_train_at:
record_memory_history(self.trace_max_entries)
def after_step(self):
if self.step == self.log_n_steps - 1:
log_memory_snapshot(self.output_dir, file_prefix=f"iter{self.trainer.iter}")
if self.trainer.iter == self.log_during_train_at + self.log_n_steps - 1:
log_memory_snapshot(self.output_dir, file_prefix=f"iter{self.trainer.iter}")
self.step += 1
|
d2go-main
|
d2go/runner/training_hooks.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
from typing import Optional, Type, Union
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.default_runner import (
BaseRunner,
Detectron2GoRunner,
GeneralizedRCNNRunner,
)
from d2go.runner.lightning_task import DefaultTask
from d2go.runner.training_hooks import TRAINER_HOOKS_REGISTRY
__all__ = [
"RunnerV2Mixin",
"BaseRunner",
"Detectron2GoRunner",
"GeneralizedRCNNRunner",
"TRAINER_HOOKS_REGISTRY",
"create_runner",
"import_runner",
]
# TODO: remove this function
def create_runner(
class_full_name: Optional[str], *args, **kwargs
) -> Union[BaseRunner, Type[DefaultTask]]:
"""Constructs a runner instance if class is a d2go runner. Returns class
type if class is a Lightning module.
"""
if class_full_name is None:
runner_class = GeneralizedRCNNRunner
else:
runner_class = import_runner(class_full_name)
if issubclass(runner_class, DefaultTask):
# Return runner class for Lightning module since it requires config
# to construct
return runner_class
return runner_class(*args, **kwargs)
def import_runner(
class_full_name: str, check: bool = True
) -> Type[Union[BaseRunner, DefaultTask]]:
runner_module_name, runner_class_name = class_full_name.rsplit(".", 1)
runner_module = importlib.import_module(runner_module_name)
runner_class = getattr(runner_module, runner_class_name)
if check and not (
issubclass(runner_class, BaseRunner) ^ issubclass(runner_class, DefaultTask)
):
raise ValueError(
f"The runner must be subclass of either `BaseRunner` or `DefaultTaks`,"
f" found: {runner_class}"
)
return runner_class
|
d2go-main
|
d2go/runner/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import final
from d2go.config import CfgNode
class RunnerV2Mixin(object):
"""
Interface for (V2) Runner:
- `get_default_cfg` is not a runner method anymore.
"""
@classmethod
@final
def get_default_cfg(cls) -> CfgNode:
raise NotImplementedError("")
|
d2go-main
|
d2go/runner/api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
from d2go.data.build import (
add_random_subset_training_sampler_default_configs,
add_weighted_training_sampler_default_configs,
)
from d2go.data.config import add_d2go_data_default_configs
from d2go.modeling.backbone.fbnet_cfg import add_fbnet_v2_default_configs
from d2go.modeling.ema import add_model_ema_configs
from d2go.modeling.kmeans_anchors import add_kmeans_anchors_cfg
from d2go.modeling.meta_arch.fcos import add_fcos_configs
from d2go.modeling.model_freezing_utils import add_model_freezing_configs
from d2go.modeling.subclass import add_subclass_configs
from d2go.quantization.modeling import add_quantization_default_configs
from d2go.registry.builtin import CONFIG_UPDATER_REGISTRY
from d2go.trainer.activation_checkpointing import add_activation_checkpoint_configs
from d2go.trainer.fsdp import add_fsdp_configs
from d2go.utils.gpu_memory_profiler import (
add_memory_profiler_configs,
add_zoomer_default_config,
)
from d2go.utils.visualization import add_tensorboard_default_configs
from detectron2.config import get_cfg as get_d2_cfg
from mobile_cv.common.misc.oss_utils import fb_overwritable
def _add_abnormal_checker_configs(_C: CN) -> None:
_C.ABNORMAL_CHECKER = CN()
# check and log the iteration with bad losses if enabled
_C.ABNORMAL_CHECKER.ENABLED = False
@fb_overwritable()
def _add_detectron2go_runner_default_fb_cfg(_C: CN) -> None:
pass
@fb_overwritable()
def _add_base_runner_default_fb_cfg(_C: CN) -> None:
pass
def add_distillation_configs(_C: CN) -> None:
"""Add default parameters to config
The TEACHER.CONFIG field allows us to build a PyTorch model using an
existing config. We can build any model that is normally supported by
D2Go (e.g., FBNet) because we just use the same config
"""
_C.DISTILLATION = CN()
_C.DISTILLATION.ALGORITHM = "LabelDistillation"
_C.DISTILLATION.HELPER = "BaseDistillationHelper"
_C.DISTILLATION.TEACHER = CN()
_C.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = ""
_C.DISTILLATION.TEACHER.DEVICE = ""
_C.DISTILLATION.TEACHER.TYPE = "torchscript"
_C.DISTILLATION.TEACHER.CONFIG_FNAME = ""
_C.DISTILLATION.TEACHER.RUNNER_NAME = "d2go.runner.GeneralizedRCNNRunner"
_C.DISTILLATION.TEACHER.OVERWRITE_OPTS = []
def _add_detectron2go_runner_default_cfg(_C: CN) -> None:
# _C.MODEL.FBNET_V2...
add_fbnet_v2_default_configs(_C)
# _C.MODEL.FROZEN_LAYER_REG_EXP
add_model_freezing_configs(_C)
# _C.MODEL other models
add_model_ema_configs(_C)
# _C.D2GO_DATA...
add_d2go_data_default_configs(_C)
# _C.TENSORBOARD...
add_tensorboard_default_configs(_C)
# _C.MODEL.KMEANS...
add_kmeans_anchors_cfg(_C)
# _C.QUANTIZATION
add_quantization_default_configs(_C)
# _C.DATASETS.TRAIN_REPEAT_FACTOR
add_weighted_training_sampler_default_configs(_C)
# _C.DATALOADER.RANDOM_SUBSET_RATIO
add_random_subset_training_sampler_default_configs(_C)
# _C.ABNORMAL_CHECKER
_add_abnormal_checker_configs(_C)
# _C.MODEL.SUBCLASS
add_subclass_configs(_C)
# _C.MODEL.FCOS
add_fcos_configs(_C)
# _C.DISTILLATION
add_distillation_configs(_C)
# _C.FSDP
add_fsdp_configs(_C)
# _C.ACTIVATION_CHECKPOINT
add_activation_checkpoint_configs(_C)
# Set find_unused_parameters for DistributedDataParallel.
_C.MODEL.DDP_FIND_UNUSED_PARAMETERS = False
# Set FP16 gradient compression for DistributedDataParallel.
_C.MODEL.DDP_FP16_GRAD_COMPRESS = False
# Specify the gradients as views
_C.MODEL.DDP_GRADIENT_AS_BUCKET_VIEW = False
# Set default optimizer
_C.SOLVER.OPTIMIZER = "sgd"
_C.SOLVER.LR_MULTIPLIER_OVERWRITE = []
_C.SOLVER.WEIGHT_DECAY_EMBED = 0.0
_C.SOLVER.WEIGHT_DECAY_OVERWRITE = []
assert not _C.SOLVER.AMP.ENABLED
# AMP precision is used by both D2 and lightning backend. Can be "float16" or "bfloat16".
_C.SOLVER.AMP.PRECISION = "float16"
# log the grad scalar to the output
_C.SOLVER.AMP.LOG_GRAD_SCALER = False
# Betas are used in the AdamW optimizer
_C.SOLVER.BETAS = (0.9, 0.999)
_C.SOLVER.EPS = 1e-08
_C.SOLVER.FUSED = False
_C.SOLVER.DETERMINISTIC = False
# RECOMPUTE_BOXES for LSJ Training
_C.INPUT.RECOMPUTE_BOXES = False
# Default world size in D2 is 0, which means scaling is not applied. For D2Go
# auto scale is encouraged, setting it to 8
assert _C.SOLVER.REFERENCE_WORLD_SIZE == 0
_C.SOLVER.REFERENCE_WORLD_SIZE = 8
# Besides scaling default D2 configs, also scale quantization configs
_C.SOLVER.AUTO_SCALING_METHODS = [
"default_scale_d2_configs",
"default_scale_quantization_configs",
]
# Modeling hooks
# List of modeling hook names
_C.MODEL.MODELING_HOOKS = []
# Profiler
_C.PROFILERS = ["default_flop_counter"]
# GPU memory profiler
add_memory_profiler_configs(_C)
# Zoomer memory profiling
add_zoomer_default_config(_C)
# Checkpointing-specific config
_C.LOAD_CKPT_TO_GPU = False
# Add FB specific configs
_add_detectron2go_runner_default_fb_cfg(_C)
# Specify whether to perform NUMA binding
_C.NUMA_BINDING = False
# Specify whether to zero the gradients before forward
_C.ZERO_GRAD_BEFORE_FORWARD = False
# Whether to enforce rebuilding data loaders for datasets that have expiration
_C.DATALOADER.ENFORE_EXPIRATION = False
def _add_rcnn_default_config(_C: CN) -> None:
_C.EXPORT_CAFFE2 = CN()
_C.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
# Options about how to export the model
_C.RCNN_EXPORT = CN()
# whether or not to include the postprocess (GeneralizedRCNN._postprocess) step
# inside the exported model
_C.RCNN_EXPORT.INCLUDE_POSTPROCESS = False
_C.RCNN_PREPARE_FOR_EXPORT = "default_rcnn_prepare_for_export"
_C.RCNN_PREPARE_FOR_QUANT = "default_rcnn_prepare_for_quant"
_C.register_deprecated_key("RCNN_PREPARE_FOR_QUANT_CONVERT")
@CONFIG_UPDATER_REGISTRY.register("BaseRunner")
def get_base_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_d2_cfg()
# upgrade from D2's CfgNode to D2Go's CfgNode
cfg = CN.cast_from_other_class(cfg)
cfg.SOLVER.AUTO_SCALING_METHODS = ["default_scale_d2_configs"]
# Frequency of metric gathering in trainer.
cfg.GATHER_METRIC_PERIOD = 1
# Frequency of metric printer, tensorboard writer, etc.
cfg.WRITER_PERIOD = 20
# Enable async writing metrics to tensorboard and logs to speed up training
cfg.ASYNC_WRITE_METRICS = False
# train_net specific arguments, define in runner but used in train_net
# run evaluation after training is done
cfg.TEST.FINAL_EVAL = True
_add_base_runner_default_fb_cfg(cfg)
return cfg
@CONFIG_UPDATER_REGISTRY.register("Detectron2GoRunner")
def get_detectron2go_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_base_runner_default_cfg(cfg)
_add_detectron2go_runner_default_cfg(cfg)
return cfg
@CONFIG_UPDATER_REGISTRY.register("GeneralizedRCNNRunner")
def get_generalized_rcnn_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_detectron2go_runner_default_cfg(cfg)
_add_rcnn_default_config(cfg)
return cfg
|
d2go-main
|
d2go/runner/config_defaults.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import logging
import os
from collections import OrderedDict
from functools import lru_cache
from typing import Any, List, Optional, Type, Union
import detectron2.utils.comm as comm
import torch
from d2go.checkpoint.api import is_distributed_checkpoint
from d2go.checkpoint.fsdp_checkpoint import FSDPCheckpointer
from d2go.config import CfgNode, CONFIG_SCALING_METHOD_REGISTRY, temp_defrost
from d2go.config.utils import get_cfg_diff_table
from d2go.data.build import build_d2go_train_loader
from d2go.data.dataset_mappers.build import build_dataset_mapper
from d2go.data.datasets import inject_coco_datasets, register_dynamic_datasets
from d2go.data.transforms.build import build_transform_gen
from d2go.data.utils import (
configure_dataset_creation,
maybe_subsample_n_images,
update_cfg_if_using_adhoc_dataset,
)
from d2go.distributed import D2GoSharedContext
from d2go.evaluation.evaluator import inference_on_dataset
from d2go.modeling import ema
from d2go.modeling.api import build_d2go_model
from d2go.modeling.kmeans_anchors import compute_kmeans_anchors_hook
from d2go.modeling.model_freezing_utils import freeze_matched_bn, set_requires_grad
from d2go.optimizer.build import build_optimizer_mapper
from d2go.quantization.modeling import QATHook, setup_qat_model
from d2go.runner.config_defaults import (
get_base_runner_default_cfg,
get_detectron2go_runner_default_cfg,
get_generalized_rcnn_runner_default_cfg,
)
from d2go.runner.training_hooks import (
D2GoGpuMemorySnapshot,
TRAINER_HOOKS_REGISTRY,
update_hooks_from_registry,
)
from d2go.trainer.fsdp import get_grad_scaler
from d2go.trainer.helper import parse_precision_from_string
from d2go.utils.abnormal_checker import (
AbnormalLossChecker,
AbnormalLossCheckerWrapper,
get_writers,
)
from d2go.utils.flop_calculator import attach_profilers
from d2go.utils.gpu_memory_profiler import attach_oom_logger
from d2go.utils.helper import D2Trainer, TensorboardXWriter
from d2go.utils.misc import get_tensorboard_log_dir
from d2go.utils.visualization import DataLoaderVisWrapper, VisualizationEvaluator
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.data import (
build_detection_test_loader as d2_build_detection_test_loader,
build_detection_train_loader as d2_build_detection_train_loader,
MetadataCatalog,
)
from detectron2.engine import hooks
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer
from detectron2.evaluation import (
COCOEvaluator,
DatasetEvaluators,
LVISEvaluator,
print_csv_format,
RotatedCOCOEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.solver import build_lr_scheduler as d2_build_lr_scheduler
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
from mobile_cv.common.misc.oss_utils import fb_overwritable
from mobile_cv.predictor.api import PredictorWrapper
from torch import nn
logger = logging.getLogger(__name__)
ALL_TB_WRITERS = []
@lru_cache()
def _get_tbx_writer(log_dir, window_size=20):
ret = TensorboardXWriter(log_dir, window_size=window_size)
ALL_TB_WRITERS.append(ret)
return ret
def _close_all_tbx_writers():
for x in ALL_TB_WRITERS:
x.close()
ALL_TB_WRITERS.clear()
@CONFIG_SCALING_METHOD_REGISTRY.register()
def default_scale_d2_configs(cfg, new_world_size):
gpu_scale = new_world_size / cfg.SOLVER.REFERENCE_WORLD_SIZE
base_lr = cfg.SOLVER.BASE_LR
base_lr_end = cfg.SOLVER.BASE_LR_END
max_iter = cfg.SOLVER.MAX_ITER
steps = cfg.SOLVER.STEPS
eval_period = cfg.TEST.EVAL_PERIOD
ims_per_batch_train = cfg.SOLVER.IMS_PER_BATCH
warmup_iters = cfg.SOLVER.WARMUP_ITERS
# lr scale
lr_scales = {
"sgd": gpu_scale,
"sgd_mt": gpu_scale,
}
optim_name = cfg.SOLVER.OPTIMIZER.lower()
# only scale the lr for the optimizers specified in `lr_scales`
lr_scale = lr_scales.get(optim_name, 1.0)
# default configs in D2
cfg.SOLVER.BASE_LR = base_lr * lr_scale
cfg.SOLVER.BASE_LR_END = base_lr_end * lr_scale
cfg.SOLVER.MAX_ITER = int(round(max_iter / gpu_scale))
cfg.SOLVER.STEPS = tuple(int(round(s / gpu_scale)) for s in steps)
cfg.TEST.EVAL_PERIOD = int(round(eval_period / gpu_scale))
cfg.SOLVER.IMS_PER_BATCH = int(round(ims_per_batch_train * gpu_scale))
cfg.SOLVER.WARMUP_ITERS = int(round(warmup_iters / gpu_scale))
@CONFIG_SCALING_METHOD_REGISTRY.register()
def default_scale_quantization_configs(cfg, new_world_size):
gpu_scale = new_world_size / cfg.SOLVER.REFERENCE_WORLD_SIZE
# Scale QUANTIZATION related configs
cfg.QUANTIZATION.QAT.START_ITER = int(
round(cfg.QUANTIZATION.QAT.START_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.FREEZE_BN_ITER = int(
round(cfg.QUANTIZATION.QAT.FREEZE_BN_ITER / gpu_scale)
)
@TRAINER_HOOKS_REGISTRY.register()
def add_memory_profiler_hook(hooks, cfg: CfgNode):
# Add GPU memory snapshot profiler to diagnose GPU OOM issues and benchmark memory usage during model training
if cfg.get("MEMORY_PROFILER", CfgNode()).get("ENABLED", False):
hooks.append(
D2GoGpuMemorySnapshot(
cfg.OUTPUT_DIR,
log_n_steps=cfg.MEMORY_PROFILER.LOG_N_STEPS,
log_during_train_at=cfg.MEMORY_PROFILER.LOG_DURING_TRAIN_AT,
trace_max_entries=cfg.MEMORY_PROFILER.TRACE_MAX_ENTRIES,
)
)
@fb_overwritable()
def prepare_fb_model(cfg: CfgNode, model: torch.nn.Module) -> torch.nn.Module:
return model
@fb_overwritable()
def get_monitoring_service() -> Any:
return contextlib.nullcontext()
class BaseRunner(object):
def __init__(self):
identifier = f"D2Go.Runner.{self.__class__.__name__}"
torch._C._log_api_usage_once(identifier)
def _initialize(self, cfg):
"""Runner should be initialized in the sub-process in ddp setting"""
if getattr(self, "_has_initialized", False):
logger.warning("Runner has already been initialized, skip initialization.")
return
self._has_initialized = True
self.register(cfg)
def register(self, cfg):
"""
Override `register` in order to run customized code before other things like:
- registering datasets.
- registering model using Registry.
"""
pass
@classmethod
def create_shared_context(cls, cfg) -> D2GoSharedContext:
"""
Override `create_shared_context` in order to run customized code to create distributed shared context that can be accessed by all workers
"""
pass
@classmethod
def get_default_cfg(cls):
return get_base_runner_default_cfg(CfgNode())
def build_model(self, cfg, eval_only=False) -> nn.Module:
# cfg may need to be reused to build trace model again, thus clone
model = build_d2go_model(cfg.clone()).model
if eval_only:
checkpointer = DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR)
checkpointer.load(cfg.MODEL.WEIGHTS)
model.eval()
return model
def do_test(self, *args, **kwargs):
raise NotImplementedError()
def do_train(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def build_detection_test_loader(cls, *args, **kwargs):
return d2_build_detection_test_loader(*args, **kwargs)
@classmethod
def build_detection_train_loader(cls, *args, **kwargs):
return d2_build_detection_train_loader(*args, **kwargs)
class D2GoDataAPIMixIn:
@staticmethod
def get_mapper(cfg, is_train):
tfm_gens = build_transform_gen(cfg, is_train)
mapper = build_dataset_mapper(cfg, is_train, tfm_gens=tfm_gens)
return mapper
@classmethod
def build_detection_test_loader(
cls, cfg, dataset_name: Union[str, List[str]], mapper=None, collate_fn=None
):
logger.info(
"Building detection test loader for dataset: {} ...".format(dataset_name)
)
with configure_dataset_creation(cfg):
mapper = mapper or cls.get_mapper(cfg, is_train=False)
logger.info("Using dataset mapper:\n{}".format(mapper))
return d2_build_detection_test_loader(
cfg, dataset_name, mapper=mapper, collate_fn=collate_fn
)
@classmethod
def build_detection_train_loader(cls, cfg, *args, mapper=None, **kwargs):
with configure_dataset_creation(cfg):
mapper = mapper or cls.get_mapper(cfg, is_train=True)
data_loader = build_d2go_train_loader(cfg, mapper)
return cls._attach_visualizer_to_data_loader(cfg, data_loader)
@classmethod
def _attach_visualizer_to_data_loader(cls, cfg, data_loader):
if comm.is_main_process():
data_loader_type = cls.get_data_loader_vis_wrapper()
if data_loader_type is not None:
tbx_writer = cls.get_tbx_writer(cfg)
data_loader = data_loader_type(cfg, tbx_writer, data_loader)
return data_loader
@classmethod
def get_tbx_writer(cls, cfg):
return _get_tbx_writer(
get_tensorboard_log_dir(cfg.OUTPUT_DIR),
window_size=cfg.get("WRITER_PERIOD", 20),
)
@staticmethod
def get_data_loader_vis_wrapper() -> Optional[Type[DataLoaderVisWrapper]]:
return DataLoaderVisWrapper
@staticmethod
def get_visualization_evaluator() -> Optional[Type[VisualizationEvaluator]]:
return VisualizationEvaluator
class Detectron2GoRunner(D2GoDataAPIMixIn, BaseRunner):
def register(self, cfg):
super().register(cfg)
self.original_cfg = cfg.clone()
inject_coco_datasets(cfg)
register_dynamic_datasets(cfg)
update_cfg_if_using_adhoc_dataset(cfg)
@classmethod
def get_default_cfg(cls):
return get_detectron2go_runner_default_cfg(CfgNode())
# temporary API
def _build_model(self, cfg, eval_only=False):
# build_model might modify the cfg, thus clone
cfg = cfg.clone()
model = build_d2go_model(cfg).model
ema.may_build_model_ema(cfg, model)
if cfg.QUANTIZATION.QAT.ENABLED:
# Disable fake_quant and observer so that the model will be trained normally
# before QAT being turned on (controlled by QUANTIZATION.QAT.START_ITER).
if hasattr(model, "get_rand_input"):
imsize = cfg.INPUT.MAX_SIZE_TRAIN
rand_input = model.get_rand_input(imsize)
example_inputs = (rand_input, {})
model = setup_qat_model(
cfg,
model,
enable_fake_quant=eval_only,
enable_observer=True,
)
model(*example_inputs)
else:
imsize = cfg.INPUT.MAX_SIZE_TRAIN
model = setup_qat_model(
cfg,
model,
enable_fake_quant=eval_only,
enable_observer=False,
)
if cfg.MODEL.FROZEN_LAYER_REG_EXP:
set_requires_grad(model, cfg.MODEL.FROZEN_LAYER_REG_EXP, False)
model = freeze_matched_bn(model, cfg.MODEL.FROZEN_LAYER_REG_EXP)
if eval_only:
checkpointer = self.build_checkpointer(cfg, model, save_dir=cfg.OUTPUT_DIR)
checkpointer.load(cfg.MODEL.WEIGHTS)
model.eval()
if cfg.MODEL_EMA.ENABLED and cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY:
ema.apply_model_ema(model)
return model
def build_model(self, cfg, eval_only=False):
# Attach memory profiler to GPU OOM events
if cfg.get("MEMORY_PROFILER", CfgNode()).get("ENABLED", False):
attach_oom_logger(
cfg.OUTPUT_DIR, trace_max_entries=cfg.MEMORY_PROFILER.TRACE_MAX_ENTRIES
)
model = self._build_model(cfg, eval_only)
model = prepare_fb_model(cfg, model)
# Note: the _visualize_model API is experimental
if comm.is_main_process():
if hasattr(model, "_visualize_model"):
logger.info("Adding model visualization ...")
tbx_writer = self.get_tbx_writer(cfg)
model._visualize_model(tbx_writer)
return model
def build_checkpointer(self, cfg, model, save_dir, **kwargs):
kwargs.update(ema.may_get_ema_checkpointer(cfg, model))
checkpointer = FSDPCheckpointer(model, save_dir=save_dir, **kwargs)
return checkpointer
def build_optimizer(self, cfg, model):
return build_optimizer_mapper(cfg, model)
def build_lr_scheduler(self, cfg, optimizer):
return d2_build_lr_scheduler(cfg, optimizer)
def _create_evaluators(
self,
cfg,
dataset_name,
output_folder,
train_iter,
model_tag,
model=None,
):
evaluator = self.get_evaluator(cfg, dataset_name, output_folder=output_folder)
if not isinstance(evaluator, DatasetEvaluators):
evaluator = DatasetEvaluators([evaluator])
if comm.is_main_process():
# Add evaluator for visualization only to rank 0
tbx_writer = self.get_tbx_writer(cfg)
logger.info("Adding visualization evaluator ...")
mapper = self.get_mapper(cfg, is_train=False)
vis_eval_type = self.get_visualization_evaluator()
if vis_eval_type is not None:
evaluator._evaluators.append(
vis_eval_type(
cfg,
tbx_writer,
mapper,
dataset_name,
train_iter=train_iter,
tag_postfix=model_tag,
)
)
return evaluator
def _do_test(self, cfg, model, train_iter=None, model_tag="default"):
"""train_iter: Current iteration of the model, None means final iteration"""
assert len(cfg.DATASETS.TEST)
assert cfg.OUTPUT_DIR
is_final = (train_iter is None) or (train_iter == cfg.SOLVER.MAX_ITER - 1)
logger.info(
f"Running evaluation for model tag {model_tag} at iter {train_iter}..."
)
def _get_inference_dir_name(base_dir, inference_type, dataset_name):
return os.path.join(
base_dir,
inference_type,
model_tag,
str(train_iter) if train_iter is not None else "final",
dataset_name,
)
attach_profilers(cfg, model)
results = OrderedDict()
results[model_tag] = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
# Evaluator will create output folder, no need to create here
output_folder = _get_inference_dir_name(
cfg.OUTPUT_DIR, "inference", dataset_name
)
# NOTE: creating evaluator after dataset is loaded as there might be dependency. # noqa
data_loader = self.build_detection_test_loader(cfg, dataset_name)
evaluator = self._create_evaluators(
cfg,
dataset_name,
output_folder,
train_iter,
model_tag,
model.module
if isinstance(model, nn.parallel.DistributedDataParallel)
else model,
)
results_per_dataset = inference_on_dataset(model, data_loader, evaluator)
if comm.is_main_process():
results[model_tag][dataset_name] = results_per_dataset
if is_final:
print_csv_format(results_per_dataset)
if is_final and cfg.TEST.AUG.ENABLED:
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
output_folder = _get_inference_dir_name(
cfg.OUTPUT_DIR, "inference_TTA", dataset_name
)
logger.info("Running inference with test-time augmentation ...")
data_loader = self.build_detection_test_loader(
cfg, dataset_name, mapper=lambda x: x
)
evaluator = self.get_evaluator(
cfg, dataset_name, output_folder=output_folder
)
inference_on_dataset(
GeneralizedRCNNWithTTA(cfg, model), data_loader, evaluator
)
if is_final and cfg.TEST.EXPECTED_RESULTS and comm.is_main_process():
assert len(results) == 1, "Results verification only supports one dataset!"
verify_results(cfg, results[model_tag][cfg.DATASETS.TEST[0]])
# write results to tensorboard
if comm.is_main_process() and results:
from detectron2.evaluation.testing import flatten_results_dict
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
tbx_writer = self.get_tbx_writer(cfg)
tbx_writer._writer.add_scalar("eval_{}".format(k), v, train_iter)
if comm.is_main_process():
tbx_writer = self.get_tbx_writer(cfg)
tbx_writer._writer.flush()
return results
def do_test(self, cfg, model, train_iter=None):
"""do_test does not load the weights of the model.
If you want to use it outside the regular training routine,
you will have to load the weights through a checkpointer.
"""
results = OrderedDict()
with maybe_subsample_n_images(cfg) as new_cfg:
# default model
cur_results = self._do_test(
new_cfg, model, train_iter=train_iter, model_tag="default"
)
results.update(cur_results)
# model with ema weights
if cfg.MODEL_EMA.ENABLED and not isinstance(model, PredictorWrapper):
logger.info("Run evaluation with EMA.")
with ema.apply_model_ema_and_restore(model):
cur_results = self._do_test(
new_cfg, model, train_iter=train_iter, model_tag="ema"
)
results.update(cur_results)
return results
def _get_trainer_hooks(
self, cfg, model, optimizer, scheduler, periodic_checkpointer, trainer
):
return [
hooks.IterationTimer(),
ema.EMAHook(cfg, model) if cfg.MODEL_EMA.ENABLED else None,
self._create_data_loader_hook(cfg),
self._create_after_step_hook(
cfg, model, optimizer, scheduler, periodic_checkpointer
),
hooks.EvalHook(
cfg.TEST.EVAL_PERIOD,
lambda: self.do_test(cfg, model, train_iter=trainer.iter),
eval_after_train=False, # done by a separate do_test call in tools/train_net.py
),
compute_kmeans_anchors_hook(self, cfg),
self._create_qat_hook(cfg) if cfg.QUANTIZATION.QAT.ENABLED else None,
]
def do_train(self, cfg, model, resume):
with get_monitoring_service():
# Note that flops at the beginning of training is often inaccurate,
# if a model has input-dependent logic
attach_profilers(cfg, model)
if cfg.NUMA_BINDING is True:
import numa
num_gpus_per_node = comm.get_local_size()
num_sockets = numa.get_max_node() + 1
socket_id = torch.cuda.current_device() // (
max(num_gpus_per_node // num_sockets, 1)
)
node_mask = set([socket_id])
numa.bind(node_mask)
optimizer = self.build_optimizer(cfg, model)
scheduler = self.build_lr_scheduler(cfg, optimizer)
checkpointer = self.build_checkpointer(
cfg,
model,
save_dir=cfg.OUTPUT_DIR,
load_ckpt_to_gpu=cfg.LOAD_CKPT_TO_GPU,
optimizer=optimizer,
scheduler=scheduler,
)
checkpoint = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume)
start_iter = (
checkpoint.get("iteration", -1)
if resume and checkpointer.has_checkpoint()
else -1
)
del checkpoint
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
start_iter += 1
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
data_loader = self.build_detection_train_loader(cfg)
def _get_model_with_abnormal_checker(model):
if not cfg.ABNORMAL_CHECKER.ENABLED:
return model
tbx_writer = self.get_tbx_writer(cfg)
writers = get_writers(cfg, tbx_writer)
checker = AbnormalLossChecker(start_iter, writers)
ret = AbnormalLossCheckerWrapper(model, checker)
return ret
if cfg.SOLVER.AMP.ENABLED:
trainer = AMPTrainer(
_get_model_with_abnormal_checker(model),
data_loader,
optimizer,
gather_metric_period=cfg.GATHER_METRIC_PERIOD,
zero_grad_before_forward=cfg.ZERO_GRAD_BEFORE_FORWARD,
grad_scaler=get_grad_scaler(cfg),
precision=parse_precision_from_string(
cfg.SOLVER.AMP.PRECISION, lightning=False
),
log_grad_scaler=cfg.SOLVER.AMP.LOG_GRAD_SCALER,
async_write_metrics=cfg.ASYNC_WRITE_METRICS,
)
else:
trainer = SimpleTrainer(
_get_model_with_abnormal_checker(model),
data_loader,
optimizer,
gather_metric_period=cfg.GATHER_METRIC_PERIOD,
zero_grad_before_forward=cfg.ZERO_GRAD_BEFORE_FORWARD,
async_write_metrics=cfg.ASYNC_WRITE_METRICS,
)
if cfg.SOLVER.AMP.ENABLED and torch.cuda.is_available():
# Allow to use the TensorFloat32 (TF32) tensor cores, available on A100 GPUs.
# For more details https://pytorch.org/docs/stable/notes/cuda.html#tf32-on-ampere.
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
elif cfg.SOLVER.DETERMINISTIC:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
trainer_hooks = self._get_trainer_hooks(
cfg, model, optimizer, scheduler, periodic_checkpointer, trainer
)
if comm.is_main_process():
assert (
cfg.GATHER_METRIC_PERIOD <= cfg.WRITER_PERIOD
and cfg.WRITER_PERIOD % cfg.GATHER_METRIC_PERIOD == 0
), "WRITER_PERIOD needs to be divisible by GATHER_METRIC_PERIOD"
tbx_writer = self.get_tbx_writer(cfg)
writers = [
CommonMetricPrinter(max_iter, window_size=cfg.WRITER_PERIOD),
JSONWriter(
os.path.join(cfg.OUTPUT_DIR, "metrics.json"),
window_size=cfg.WRITER_PERIOD,
),
tbx_writer,
]
trainer_hooks.append(hooks.PeriodicWriter(writers, cfg.WRITER_PERIOD))
update_hooks_from_registry(trainer_hooks, cfg)
trainer.register_hooks(trainer_hooks)
trainer.train(start_iter, max_iter)
if hasattr(self, "original_cfg"):
table = get_cfg_diff_table(cfg, self.original_cfg)
logger.info(
"GeneralizeRCNN Runner ignoring training config change: \n" + table
)
trained_cfg = self.original_cfg.clone()
else:
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = checkpointer.get_checkpoint_file()
return {"model_final": trained_cfg}
@staticmethod
def get_evaluator(cfg, dataset_name, output_folder):
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["coco", "coco_panoptic_seg"]:
# D2 is in the process of reducing the use of cfg.
dataset_evaluators = COCOEvaluator(
dataset_name,
output_dir=output_folder,
kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
)
elif evaluator_type in ["rotated_coco"]:
dataset_evaluators = DatasetEvaluators(
[RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder)]
)
elif evaluator_type in ["lvis"]:
dataset_evaluators = LVISEvaluator(
dataset_name,
output_dir=output_folder,
max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
)
else:
dataset_evaluators = D2Trainer.build_evaluator(
cfg, dataset_name, output_folder
)
if not isinstance(dataset_evaluators, DatasetEvaluators):
dataset_evaluators = DatasetEvaluators([dataset_evaluators])
return dataset_evaluators
@staticmethod
def final_model_name():
return "model_final"
def _create_after_step_hook(
self, cfg, model, optimizer, scheduler, periodic_checkpointer
):
"""
Create a hook that performs some pre-defined tasks used in this script
(evaluation, LR scheduling, checkpointing).
"""
def after_step_callback(trainer):
trainer.storage.put_scalar(
"lr", optimizer.param_groups[0]["lr"], smoothing_hint=False
)
if trainer.iter < cfg.SOLVER.MAX_ITER - 1:
# Since scheduler.step() is called after the backward at each iteration,
# this will cause "where = 1.0" in the scheduler after the last interation,
# which will trigger "IndexError: list index out of range" in StepParamScheduler.
# See test_warmup_stepwithfixedgamma in vision/fair/detectron2/tests:test_scheduler for an example
scheduler.step()
# Note: when precise BN is enabled, some checkpoints will have more precise
# statistics than others, if they are saved immediately after eval.
# Note: FSDP requires all ranks to execute saving/loading logic
if comm.is_main_process() or is_distributed_checkpoint(
periodic_checkpointer.checkpointer
):
periodic_checkpointer.step(trainer.iter)
return hooks.CallbackHook(after_step=after_step_callback)
def _create_data_loader_hook(self, cfg):
"""
Create a hook for manipulating data loader
"""
return None
def _create_qat_hook(self, cfg) -> Optional[QATHook]:
"""
Create a hook to start QAT (during training) and/or change the phase of QAT.
"""
if not cfg.QUANTIZATION.QAT.ENABLED:
return None
return QATHook(cfg, self.build_detection_train_loader)
class GeneralizedRCNNRunner(Detectron2GoRunner):
@classmethod
def get_default_cfg(cls):
return get_generalized_rcnn_runner_default_cfg(CfgNode())
|
d2go-main
|
d2go/runner/default_runner.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import pytorch_lightning as pl
import torch
from d2go.config import CfgNode
from d2go.data.datasets import inject_coco_datasets, register_dynamic_datasets
from d2go.data.utils import update_cfg_if_using_adhoc_dataset
from d2go.modeling.api import build_meta_arch
from d2go.modeling.model_freezing_utils import set_requires_grad
from d2go.optimizer.build import build_optimizer_mapper
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.callbacks.quantization import maybe_prepare_for_quantization, PREPARED
from d2go.runner.default_runner import (
_get_tbx_writer,
D2GoDataAPIMixIn,
Detectron2GoRunner,
GeneralizedRCNNRunner,
)
from d2go.utils.ema_state import EMAState
from d2go.utils.misc import get_tensorboard_log_dir
from detectron2.engine.train_loop import HookBase
from detectron2.solver import build_lr_scheduler as d2_build_lr_scheduler
from mobile_cv.common.misc.oss_utils import fb_overwritable
from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.utilities.logger import _flatten_dict
_STATE_DICT_KEY = "state_dict"
_OLD_STATE_DICT_KEY = "model"
_OLD_EMA_KEY = "ema_state"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _is_lightning_checkpoint(checkpoint: Dict[str, Any]) -> bool:
"""Returns true if we believe this checkpoint to be a Lightning checkpoint."""
return _STATE_DICT_KEY in checkpoint
def _is_d2go_checkpoint(checkpoint: Dict[str, Any]) -> bool:
"""Returns true if we believe this to be a D2Go checkpoint."""
d2_go_keys = [_OLD_STATE_DICT_KEY, "iteration"]
for key in d2_go_keys:
if key not in checkpoint:
return False
return True
def _convert_to_lightning(d2_checkpoint: Dict[str, Any]) -> None:
"""Converst a D2Go Checkpoint to Lightning in-place by renaming keys."""
prefix = "model" # based on DefaultTask.model.
old_keys = list(d2_checkpoint[_OLD_STATE_DICT_KEY])
for key in old_keys:
d2_checkpoint[_OLD_STATE_DICT_KEY][f"{prefix}.{key}"] = d2_checkpoint[
_OLD_STATE_DICT_KEY
][key]
del d2_checkpoint[_OLD_STATE_DICT_KEY][key]
if "model.pixel_mean" in d2_checkpoint[_OLD_STATE_DICT_KEY]:
del d2_checkpoint[_OLD_STATE_DICT_KEY]["model.pixel_mean"]
if "model.pixel_std" in d2_checkpoint[_OLD_STATE_DICT_KEY]:
del d2_checkpoint[_OLD_STATE_DICT_KEY]["model.pixel_std"]
for old, new in zip(
[_OLD_STATE_DICT_KEY, "iteration"], [_STATE_DICT_KEY, "global_step"]
):
d2_checkpoint[new] = d2_checkpoint[old]
del d2_checkpoint[old]
for old, new in zip(
["optimizer", "scheduler"], ["optimizer_states", "lr_schedulers"]
):
if old not in d2_checkpoint:
continue
d2_checkpoint[new] = [d2_checkpoint[old]]
del d2_checkpoint[old]
if _OLD_EMA_KEY in d2_checkpoint:
d2_checkpoint["model_ema"] = d2_checkpoint[_OLD_EMA_KEY]
del d2_checkpoint[_OLD_EMA_KEY]
d2_checkpoint["epoch"] = 0
class ModelTag(str, Enum):
DEFAULT = "default"
EMA = "ema"
@fb_overwritable()
def get_gpu_profiler(cfg: CfgNode) -> Optional[HookBase]:
return None
class DefaultTask(D2GoDataAPIMixIn, pl.LightningModule):
def __init__(self, cfg: CfgNode):
super().__init__()
self.register(cfg)
self.cfg = cfg
self.model = self._build_model()
self.storage = None
# evaluators for validation datasets, split by model tag(default, ema),
# in the order of DATASETS.TEST
self.dataset_evaluators = {ModelTag.DEFAULT: []}
self.save_hyperparameters()
self.eval_res = None
# Support custom training step in meta arch
if hasattr(self.model, "training_step"):
# activate manual optimization for custom training step
self.automatic_optimization = False
self.ema_state: Optional[EMAState] = None
if cfg.MODEL_EMA.ENABLED:
self.ema_state = EMAState(
decay=cfg.MODEL_EMA.DECAY,
device=cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE,
)
self.dataset_evaluators[ModelTag.EMA] = []
self.gpu_profiler: Optional[HookBase] = get_gpu_profiler(cfg)
def _build_model(self) -> torch.nn.Module:
model = build_meta_arch(self.cfg)
if self.cfg.MODEL.FROZEN_LAYER_REG_EXP:
set_requires_grad(model, self.cfg.MODEL.FROZEN_LAYER_REG_EXP, value=False)
return model
@classmethod
def from_config(cls, cfg: CfgNode, eval_only=False):
"""Builds Lightning module including model from config.
To load weights from a pretrained checkpoint, please specify checkpoint
path in `MODEL.WEIGHTS`.
Args:
cfg: D2go config node.
eval_only: True if module should be in eval mode.
"""
if eval_only and not cfg.MODEL.WEIGHTS:
logger.warning("MODEL.WEIGHTS is missing for eval only mode.")
if cfg.MODEL.WEIGHTS:
# only load model weights from checkpoint
logger.info(f"Load model weights from checkpoint: {cfg.MODEL.WEIGHTS}.")
task = cls.load_from_checkpoint(cfg.MODEL.WEIGHTS, cfg=cfg, strict=False)
else:
task = cls(cfg)
if cfg.MODEL_EMA.ENABLED and cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY:
assert task.ema_state, "EMA state is not loaded from checkpoint."
task.ema_state.apply_to(task.model)
if eval_only:
task.eval()
return task
def training_step(self, batch, batch_idx):
if hasattr(self.model, "training_step"):
return self._meta_arch_training_step(batch, batch_idx)
return self._standard_training_step(batch, batch_idx)
def _standard_training_step(self, batch, batch_idx):
loss_dict = self.forward(batch)
losses = sum(loss_dict.values())
loss_dict["total_loss"] = losses
self.storage.step()
self.log_dict(loss_dict, prog_bar=True)
return losses
def _meta_arch_training_step(self, batch, batch_idx):
opt = self.optimizers()
loss_dict = self.model.training_step(
batch, batch_idx, opt, self.manual_backward
)
sch = self.lr_schedulers()
sch.step()
self.storage.step()
self.log_dict(loss_dict, prog_bar=True)
return loss_dict
def test_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
self._evaluation_step(batch, batch_idx, dataloader_idx)
def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
self._evaluation_step(batch, batch_idx, dataloader_idx)
def _evaluation_step(self, batch, batch_idx: int, dataloader_idx: int) -> None:
if not isinstance(batch, List):
batch = [batch]
outputs = self.forward(batch)
self.dataset_evaluators[ModelTag.DEFAULT][dataloader_idx].process(
batch, outputs
)
if self.ema_state:
ema_outputs = self.model_ema(batch)
self.dataset_evaluators[ModelTag.EMA][dataloader_idx].process(
batch, ema_outputs
)
def _log_dataset_evaluation_results(self) -> None:
nested_res = {}
for tag, evaluators in self.dataset_evaluators.items():
res = {}
for idx, evaluator in enumerate(evaluators):
dataset_name = self.cfg.DATASETS.TEST[idx]
res[dataset_name] = evaluator.evaluate()
nested_res[tag.lower()] = res
self.eval_res = nested_res
flattened = _flatten_dict(nested_res)
if self.trainer.global_rank:
assert (
len(flattened) == 0
), "evaluation results should have been reduced on rank 0."
self.log_dict(flattened, rank_zero_only=True)
def test_epoch_end(self, _outputs) -> None:
self._evaluation_epoch_end()
def validation_epoch_end(self, _outputs) -> None:
self._evaluation_epoch_end()
def _evaluation_epoch_end(self) -> None:
self._log_dataset_evaluation_results()
self._reset_dataset_evaluators()
def configure_optimizers(
self,
) -> Tuple[List[torch.optim.Optimizer], List]:
model = self.model
if hasattr(self, PREPARED):
# train the prepared model for FX quantization
model = getattr(self, PREPARED)
optim = build_optimizer_mapper(self.cfg, model)
lr_scheduler = d2_build_lr_scheduler(self.cfg, optim)
return [optim], [{"scheduler": lr_scheduler, "interval": "step"}]
def train_dataloader(self):
return self.build_detection_train_loader(self.cfg)
def _reset_dataset_evaluators(self):
"""reset validation dataset evaluator to be run in EVAL_PERIOD steps"""
assert isinstance(self.trainer.strategy, (SingleDeviceStrategy, DDPStrategy)), (
"Only Single Device or DDP strategies are supported,"
f" instead found: {self.trainer.strategy}"
)
def _get_inference_dir_name(
base_dir, inference_type, dataset_name, model_tag: ModelTag
):
next_eval_iter = self.trainer.global_step + self.cfg.TEST.EVAL_PERIOD
if self.trainer.global_step == 0:
next_eval_iter -= 1
return os.path.join(
base_dir,
inference_type,
model_tag,
str(next_eval_iter),
dataset_name,
)
@rank_zero_only
def _setup_visualization_evaluator(
evaluator,
dataset_name: str,
model_tag: ModelTag,
) -> None:
logger.info("Adding visualization evaluator ...")
mapper = self.get_mapper(self.cfg, is_train=False)
vis_eval_type = self.get_visualization_evaluator()
# TODO: replace tbx_writter with Lightning's self.logger.experiment
tbx_writter = _get_tbx_writer(get_tensorboard_log_dir(self.cfg.OUTPUT_DIR))
if vis_eval_type is not None:
evaluator._evaluators.append(
vis_eval_type(
self.cfg,
tbx_writter,
mapper,
dataset_name,
train_iter=self.trainer.global_step,
tag_postfix=model_tag,
)
)
for tag, dataset_evaluators in self.dataset_evaluators.items():
dataset_evaluators.clear()
assert self.cfg.OUTPUT_DIR, "Expect output_dir to be specified in config"
for dataset_name in self.cfg.DATASETS.TEST:
# setup evaluator for each dataset
output_folder = _get_inference_dir_name(
self.cfg.OUTPUT_DIR, "inference", dataset_name, tag
)
evaluator = self.get_evaluator(
self.cfg, dataset_name, output_folder=output_folder
)
evaluator.reset()
dataset_evaluators.append(evaluator)
_setup_visualization_evaluator(evaluator, dataset_name, tag)
def _evaluation_dataloader(self):
# TODO: Support subsample n images
assert len(self.cfg.DATASETS.TEST)
dataloaders = []
for dataset_name in self.cfg.DATASETS.TEST:
dataloaders.append(self.build_detection_test_loader(self.cfg, dataset_name))
self._reset_dataset_evaluators()
return dataloaders
def test_dataloader(self):
return self._evaluation_dataloader()
def val_dataloader(self):
return self._evaluation_dataloader()
def forward(self, input):
return self.model(input)
# ---------------------------------------------------------------------------
# Runner methods
# ---------------------------------------------------------------------------
def register(self, cfg: CfgNode):
inject_coco_datasets(cfg)
register_dynamic_datasets(cfg)
update_cfg_if_using_adhoc_dataset(cfg)
@classmethod
def build_model(cls, cfg: CfgNode, eval_only=False):
"""Builds D2go model instance from config.
NOTE: For backward compatible with existing D2Go tools. Prefer
`from_config` in other use cases.
Args:
cfg: D2go config node.
eval_only: True if model should be in eval mode.
"""
task = cls.from_config(cfg, eval_only)
if hasattr(task, PREPARED):
task = getattr(task, PREPARED)
return task.model
@classmethod
def get_default_cfg(cls):
return Detectron2GoRunner.get_default_cfg()
@staticmethod
def _initialize(cfg: CfgNode):
pass
@staticmethod
def get_evaluator(cfg: CfgNode, dataset_name: str, output_folder: str):
return Detectron2GoRunner.get_evaluator(
cfg=cfg, dataset_name=dataset_name, output_folder=output_folder
)
# ---------------------------------------------------------------------------
# Hooks
# ---------------------------------------------------------------------------
def on_fit_start(self) -> None:
if self.cfg.MODEL_EMA.ENABLED:
if self.ema_state and self.ema_state.has_inited():
# ema_state could have been loaded from checkpoint
# move to the current CUDA device if not on CPU
self.ema_state.to(self.ema_state.device)
return
self.ema_state = EMAState.from_model(
self.model,
decay=self.cfg.MODEL_EMA.DECAY,
device=self.cfg.MODEL_EMA.DEVICE or self.cfg.MODEL.DEVICE,
)
def on_train_batch_start(self, *_) -> None:
if self.gpu_profiler is not None:
self.gpu_profiler.before_step()
def on_train_batch_end(self, *_) -> None:
if self.ema_state:
self.ema_state.update(self.model)
if self.gpu_profiler is not None:
# NOTE: keep this last in function to include all ops in this iteration of the trace
self.gpu_profiler.after_step()
def on_test_epoch_start(self):
self._on_evaluation_epoch_start()
def on_validation_epoch_start(self):
self._on_evaluation_epoch_start()
def _on_evaluation_epoch_start(self):
if self.ema_state:
self.model_ema = deepcopy(self.model)
self.ema_state.apply_to(self.model_ema)
def on_validation_epoch_end(self):
if self.ema_state and hasattr(self, "model_ema"):
del self.model_ema
def on_test_epoch_end(self):
if self.ema_state and hasattr(self, "model_ema"):
del self.model_ema
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if self.ema_state:
checkpoint["model_ema"] = self.ema_state.state_dict()
def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
"""
Called before model state is restored. Explicitly handles old model
states so we can resume training from D2Go checkpoints transparently.
Args:
checkpointed_state: The raw checkpoint state as returned by torch.load
or equivalent.
"""
# If this is a non-Lightning checkpoint, we need to convert it.
if not _is_lightning_checkpoint(checkpointed_state) and not _is_d2go_checkpoint(
checkpointed_state
):
raise ValueError(
f"Invalid checkpoint state with keys: {checkpointed_state.keys()}"
)
if not _is_lightning_checkpoint(checkpointed_state):
_convert_to_lightning(checkpointed_state)
maybe_prepare_for_quantization(self, checkpointed_state)
if self.ema_state:
if "model_ema" not in checkpointed_state:
rank_zero_info(
"EMA is enabled but EMA state is not found in given checkpoint"
)
else:
self.ema_state = EMAState(
decay=self.cfg.MODEL_EMA.DECAY,
device=self.cfg.MODEL_EMA.DEVICE or self.cfg.MODEL.DEVICE,
)
self.ema_state.load_state_dict(checkpointed_state["model_ema"])
rank_zero_info("Loaded EMA state from checkpoint.")
# TODO(T123654122): subclass of DefaultTask will be refactored
class GeneralizedRCNNTask(DefaultTask):
@classmethod
def get_default_cfg(cls):
return GeneralizedRCNNRunner.get_default_cfg()
# TODO(T123654122): subclass of DefaultTask will be refactored
class GeneralizedRCNNTaskNoDefaultConfig(RunnerV2Mixin, DefaultTask):
"""
Similar to `GeneralizedRCNNTask` but allowing specifying default config in yaml via `_defaults_`
"""
pass
|
d2go-main
|
d2go/runner/lightning_task.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import random
import torch
import torch.nn as nn
from d2go.quantization.modeling import QATCheckpointer
from d2go.runner.default_runner import BaseRunner
from d2go.utils.visualization import add_tensorboard_default_configs
from detectron2.utils.file_io import PathManager
class DebugRunner(BaseRunner):
@classmethod
def get_default_cfg(cls):
_C = super().get_default_cfg()
# _C.TENSORBOARD...
add_tensorboard_default_configs(_C)
# target metric
_C.TEST.TARGET_METRIC = "dataset0:dummy0:metric1"
return _C
def build_model(self, cfg, eval_only=False):
return nn.Sequential()
def do_test(self, cfg, model, train_iter=None):
return {
"dataset0": {
"dummy0": {"metric0": random.random(), "metric1": random.random()}
}
}
def do_train(self, cfg, model, resume):
# save a dummy checkpoint file
save_file = os.path.join(cfg.OUTPUT_DIR, "model_123.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
save_file = os.path.join(cfg.OUTPUT_DIR, "model_12345.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
save_file = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
def build_checkpointer(self, cfg, model, save_dir, **kwargs):
checkpointer = QATCheckpointer(model, save_dir=save_dir, **kwargs)
return checkpointer
@staticmethod
def final_model_name():
return "model_final"
|
d2go-main
|
d2go/runner/debug_runner.py
|
# pyre-ignore-all-errors
import functools
from abc import ABC
from copy import deepcopy
from dataclasses import dataclass
from types import MethodType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from d2go.config import CfgNode
from d2go.quantization.modeling import prepare_fake_quant_model
from d2go.utils.misc import mode
from mobile_cv.arch.quantization.observer import update_stat as observer_update_stat
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_info
from torch.ao.quantization.qconfig import (
get_default_qat_qconfig,
get_default_qconfig,
QConfig,
QConfigDynamic,
)
from torch.ao.quantization.quant_type import QuantType
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from torch.ao.quantization.utils import get_fqn_to_example_inputs, get_quant_type
QConfigDicts = Dict[str, Dict[str, Union[QConfig, QConfigDynamic]]]
PREPARED = "_prepared"
def rsetattr(obj: Any, attr: str, val: Any) -> None:
"""Same as setattr but supports deeply nested objects."""
pre, _, post = attr.rpartition(".")
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj: Any, attr: str, *args) -> Any:
"""Same as getattr but supports deeply nested objects."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
def rhasattr(obj: Any, attr: str, *args) -> bool:
"""Same as hasattr but supports deeply nested objects."""
try:
_ = rgetattr(obj, attr, *args)
except AttributeError:
return False
return True
def _quantized_forward(self, *args, **kwargs):
"""Forward method for a quantized module."""
if not self.training and hasattr(self, "_quantized"):
return self._quantized(*args, **kwargs)
return self._prepared(*args, **kwargs)
def _requires_calibration(config_dicts: QConfigDicts) -> bool:
"""Returns whether the given config_dicts for quantization requires calibration.
A config_dicts requires calibration if at least one of the configs in the
dictioary is a QConfig with an activation observer.
Args:
config: The config dictionary to check.
Returns:
Boolean as described.
"""
for qconfig_dict in config_dicts.values():
for qconfig in qconfig_dict.values():
qtype = get_quant_type(qconfig)
if qtype == QuantType.STATIC:
return True
return False
def checkpoint_has_prepared(checkpoint: Dict[str, Any]) -> bool:
return any(k.startswith(PREPARED) for k in checkpoint["state_dict"].keys())
def maybe_prepare_for_quantization(model: LightningModule, checkpoint: Dict[str, Any]):
if checkpoint_has_prepared(checkpoint) and not hasattr(model, PREPARED):
# model has been prepared for QAT before saving into checkpoint
copied = deepcopy(model)
prepared = prepare_fake_quant_model(copied.cfg, copied.model, is_qat=True)
copied.model = prepared
setattr(model, PREPARED, copied)
class QuantizationMixin(ABC):
"""Mixin defining an overrideable API for quantization customization.
For example, suppose our model contains traceable and non-traceable modules:
>>> class MyNonTraceableModel(LightningModule):
... def __init__(self):
... self.traceable = ...
... self.non_traceable = ...
...
... def forward(self, x):
... x = self.traceable(x)
... return self.non_traceable(x)
Then using FX-mode quantization, we can only quantize the traceable pieces.
As such, we could do something like the below, shown here for QAT.
>>> class MyQuantizationCallback(QuantizedAwareTraining):
... def prepare(self, model, config, attrs):
... model.traceable = prepare_qat_fx(model.traceable, config)
... return model
...
... def convert(self, model, attr):
... model.traceable = convert_fx(model.traceable)
... return model
We can then use this callback as with any other.:
Example::
>>> model = MyNonTraceableModel(...)
>>> quantization = MyQuantizationCallback()
>>> trainer = Trainer(
... callbacks=[quantization],
... )
>>> trainer.fit(model)
"""
def prepare(
self, root: LightningModule, configs: QConfigDicts, attrs: Set[str]
) -> torch.nn.Module:
"""Prepares the root user modules for quantization.
By default, this tries to prepare the entire LightningModule. If this is
not possible (eg, due to traceability, etc.), the recommended method to
use is to override the `prepare` method to prepare the root as
appropriate, and also override the `quantize` method to only quantize
the prepared pieces of the root.
Args:
root: The LightningModule as given to the lightning Trainer in train mode.
configs: Specification to be used when preparing the model, as provided by the user.
It is guaranteed that no key is a suffix of another.
attrs: The list of attributes to maintain for the module.
Returns:
The prepared Module to be used for quantized aware training.
"""
is_qat = isinstance(self, QuantizationAwareTraining)
self._convert_fx_callback = None
if hasattr(root.model, "custom_prepare_fx"):
prepared, convert_fx_callback = root.model.custom_prepare_fx(
root.cfg, is_qat
)
self._convert_fx_callback = convert_fx_callback
root.model = prepared
return root
prep_fn = prepare_qat_fx if is_qat else prepare_fx
old_attrs = {
attr: rgetattr(root, attr) for attr in attrs if rhasattr(root, attr)
}
prepared = root
if "" in configs:
prepared = prep_fn(root, configs[""], root.example_input_array)
else:
fqn_to_example_inputs = get_fqn_to_example_inputs(
root, root.example_input_array
)
for name, config in configs.items():
submodule = rgetattr(root, name)
rsetattr(
root, name, prep_fn(submodule, config, fqn_to_example_inputs[name])
)
for attr, value in old_attrs.items():
rsetattr(prepared, attr, value)
return prepared
def convert(
self, root: torch.nn.Module, submodules: Set[str], attrs: Set[str]
) -> torch.nn.Module:
"""Quantizes a previously prepared module (as returned by `prepare`).
By default, this simply quantizes the entire root. If the `prepare`
method was customized, this will need to be changed as well.
Args:
root: The prepared model as returned by `prepare`, after training.
submodules: An iterator of fully qualified submodules names that require
converting.
attrs: The list of attributes to maintain for the module across this call.
Returns:
The quantized model.
"""
if self._convert_fx_callback is not None:
return self._convert_fx_callback(root)
old_attrs = {
attr: rgetattr(root, attr) for attr in attrs if rhasattr(root, attr)
}
converted = root
if "" in submodules:
converted = convert_fx(root)
else:
for name in submodules:
prepared = rgetattr(root, name)
rsetattr(root, name, convert_fx(prepared))
for attr, value in old_attrs.items():
rsetattr(converted, attr, value)
rsetattr(root, attr, value)
return converted
@dataclass(frozen=True)
class ModelTransform:
"""Defines a step or interval at which fn should be .apply(fn)'ed and a message to log.
Properties:
fn: The function to apply. Must be passable to torch.nn.Module.apply(fn).
step: Only one of `step` or `interval` must be defined. If step is defined,
`fn` will be applied exactly once right before `step` step begins.
interval: Only one of `step` or `interval` must be defined. If `interval`
is defined, the transform will be applied periodically every
`interval` steps.
message: A short non-punctuated message to log in the master worker when
this transform is triggered.
"""
fn: Callable[[torch.nn.Module], None]
message: str
step: Optional[int] = None
interval: Optional[int] = None
def __post_init__(self) -> None:
"""Validate a few properties for early failure."""
if (self.step is None and self.interval is None) or (
self.step is not None and self.interval is not None
):
raise TypeError("Exactly one of step or interval must be defined.")
if self.step is not None and self.step < 0:
raise ValueError("step must be non-negative.")
if self.interval is not None and self.interval <= 0:
raise ValueError("interval must be positive.")
class QuantizationAwareTraining(Callback, QuantizationMixin):
"""Enable QAT of a model using the STL Trainer.
Node that this callback makes changes during training in order to properly
quantize the provided LightningModule.
Example::
>>> from stl.lightning.callbacks.quantization import QuantizationAwareTraining
>>> from pytorch_lightning import Trainer
>>> from stl.lightning.utilities.model import mode
...
# MyLightningModule must define val_dataloader() which is used both for
# validation as well as calibration of the quantized model.
>>> model = MyLightningModule(...)
>>> qat = QuantizationAwareTraining()
>>> trainer = Trainer(
... callbacks=[qat],
... )
# This will convert the model into one that is quantizeable, train it,
# and then quantize it after training is done.
>>> trainer.fit(model)
# You can use the model directly.
>>> input = ...
>>> with mode(model, training=False) as m:
... quantized_out = m(input)
If you only wish to quantize parts of your model, please see QuantizationMixin
for an example of how to do this.
Properties:
transforms: A list of ModelTransform's applied to the model exactly once
as specified during training. Example transforms are enabling/disabling
observers/quants, which are added to this list based on the init
parameters to this callback. Users can further augment the list
with more custom modules.
prepared: If set, this is the prepared model. Only available
after .fit() starts.
qconfig_dicts:
This is a map from the `module_qualified_name` to the corresponding QConfigDict
to apply to that module. For example, suppose your LightningModule contains
two submodules module.scriptable and module.not_scriptable. You'd provide
a qconfig_dicts like:
{
"scriptable": ...
}
This will quantize just module.scriptable using the provided QConfigDict,
or a default one. If you wish to quantize the entire LightningModule,
simply use "" as the qualified name. The name should match the names
returned by module.named_modules().
quantized: If set, this is the fully quantized model. Only available
after .fit() finishes.
"""
def __init__(
self,
start_step: int = 0,
enable_observer: Tuple[int, Optional[int]] = (0, None),
freeze_bn_step: Optional[int] = None,
qconfig_dicts: Optional[
Dict[str, Optional[Dict[str, Union[QConfig, QConfigDynamic]]]]
] = None,
preserved_attrs: Optional[List[str]] = None,
skip_conversion: bool = False,
) -> None:
"""
Args:
start_step: The training step at which QAT is enabled. The model is
always mutated with the appropriate stubs, but they are disabled
until the start of this training step.
See FakeQuantizeBase.fake_quant_enabled
enable_observer: The half-open interval [a, b) in steps during which the
observers are enabled. See FakeQuantizeBase.observer_enabled. If
b is None, the observer is never disabled once enabled.
freeze_bn_step: If specified, the step at which we apply freeze the
collection of batch normalization layer statistics for QAT.
qconfig_dicts: If given, used for quantization of the model during training.
preserved_attrs: If provided, a list of attributes to preserve across
quantized modules. These are preserved only if they already exists.
"""
if start_step < 0:
raise ValueError(
f"The starting step of QAT must be non-negative. Got {start_step}."
)
start_observer, end_observer = enable_observer
if start_observer < 0:
raise ValueError(
f"The starting step for the observer must be non-negative. Got {start_observer}."
)
if end_observer and end_observer <= start_observer:
raise ValueError(
f"The observation interval must contain at least one step. Got [{start_step}, {end_observer})."
)
if freeze_bn_step and freeze_bn_step < 0:
raise ValueError(
f"The step at which batch norm layers are frozen must be non-negative. Got {freeze_bn_step}."
)
self.transforms: List[ModelTransform] = []
if start_step > 0:
self.transforms.extend(
[
# Enabled by default, so the assumption for > 0 is that the
# user wants it disabled then enabled.
ModelTransform(
fn=torch.ao.quantization.disable_fake_quant,
step=0,
message="Disable fake quant",
),
ModelTransform(
fn=torch.ao.quantization.enable_fake_quant,
step=start_step,
message="Enable fake quant to start QAT",
),
]
)
if start_observer > 0:
self.transforms.extend(
# See comment for start_step above.
[
ModelTransform(
fn=torch.ao.quantization.disable_observer,
step=0,
message="Disable observer",
),
ModelTransform(
fn=torch.ao.quantization.enable_observer,
step=start_observer,
message="Start observer",
),
]
)
if end_observer is not None:
self.transforms.append(
ModelTransform(
fn=torch.ao.quantization.disable_observer,
step=end_observer,
message="End observer",
)
)
if freeze_bn_step is not None:
self.transforms.append(
ModelTransform(
fn=torch.nn.intrinsic.qat.freeze_bn_stats,
step=freeze_bn_step,
message="Freeze BN",
)
)
self.prepared: Optional[torch.nn.Module] = None
self.preserved_attrs = set([] if preserved_attrs is None else preserved_attrs)
if not qconfig_dicts:
self.qconfig_dicts: QConfigDicts = {"": {"": get_default_qat_qconfig()}}
else:
self.qconfig_dicts: QConfigDicts = {
key: value if value else {"": get_default_qat_qconfig()}
for key, value in qconfig_dicts.items()
}
self.quantized: Optional[torch.nn.Module] = None
self.skip_conversion = skip_conversion
@classmethod
def from_config(cls, cfg: CfgNode):
qat = cfg.QUANTIZATION.QAT
callback = cls(
qconfig_dicts={submodule: None for submodule in cfg.QUANTIZATION.MODULES}
if cfg.QUANTIZATION.MODULES
else None,
# We explicitly pass this to maintain properties for now.
preserved_attrs=["model.backbone.size_divisibility"],
start_step=qat.START_ITER,
enable_observer=(qat.ENABLE_OBSERVER_ITER, qat.DISABLE_OBSERVER_ITER),
freeze_bn_step=qat.FREEZE_BN_ITER,
skip_conversion=True, # convert_fx will be handled by D2Go exporter
)
if qat.UPDATE_OBSERVER_STATS_PERIODICALLY:
callback.transforms.append(
ModelTransform(
interval=qat.UPDATE_OBSERVER_STATS_PERIOD,
fn=observer_update_stat,
message="Updating observers.",
)
)
return callback
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
"""Override the model with a quantized-aware version on setup.
This is the earliest place we can override this model which allows for
appropriate behavior when restoring from checkpoints, as well as connecting
to accelerators, etc.
The model is only prepared once.
"""
# Only prepare the model once.
if hasattr(pl_module, "_prepared"):
return
with mode(pl_module, training=True) as train:
prepared = self.prepare(
deepcopy(train),
configs=self.qconfig_dicts,
attrs=self.preserved_attrs,
)
# freeze the original model since only the prepared model will
# participate in forward.
for x in train.parameters():
x.requires_grad = False
pl_module._prepared = prepared
pl_module.forward = MethodType(_quantized_forward, pl_module)
self.prepared = pl_module._prepared
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
"""Applies model transforms at as specified during training."""
apply_only_once = []
current_step = trainer.global_step
for i, transform in enumerate(self.transforms):
if (transform.step is not None and transform.step <= current_step) or (
transform.interval is not None
and current_step % transform.interval == 0
):
self.prepared.apply(transform.fn)
rank_zero_info(
f"[QAT] {transform.message} at step={trainer.global_step}."
)
if transform.step is not None and transform.step <= current_step:
apply_only_once.append(i)
if apply_only_once:
self.transforms = [
transform
for i, transform in enumerate(self.transforms)
if i not in set(apply_only_once)
]
def on_fit_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Quantize the weights since training has finalized."""
if hasattr(pl_module, "_quantized") or self.skip_conversion:
return
pl_module._quantized = self.convert(
pl_module._prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
self.quantized = pl_module._quantized
def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Make sure we have a quantized version.
This handles the edge case where a user does .test() without .fit() first.
"""
if hasattr(pl_module, "_quantized"):
return
pl_module._quantized = self.convert(
pl_module._prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
self.quantized = pl_module._quantized
class PostTrainingQuantization(Callback, QuantizationMixin):
"""Enable post-training quantization, such as dynamic, static, and weight-only.
This is an idempotent callback (to contrast with QuantizationAwareTraining).
If calibration is required, we will use the validation data set provided to
the STL Trainer, and this occurs on each validation run.
The quantized model is made available as a property of the callback.
Example::
>>> from stl.lightning.callbacks.quantization import PostTrainingQuantization
>>> from pytorch_lightning import Trainer
>>> from stl.lightning.utilities.model import mode
...
# MyLightningModule must define val_dataloader() which is used both for
# validation as well as calibration of the quantized model.
>>> model = MyLightningModule(...)
>>> post_training_quant = PostTrainingQuantization()
>>> trainer = Trainer(
... callbacks=[post_training_quant],
... )
# This will both train the model + create a *separate* quantized version.
# The original model is left unchaged.
>>> trainer.fit(model)
# You can access the quantized version of the model directly.
>>> input = ...
>>> with mode(post_training_quant.quantized, training=False) as m:
... quantized_out = m(input)
If you only wish to quantize parts of your model, please see QuantizationMixin
for an example of how to do this.
Properties:
prepared: If set, this is the prepared model which can be used for
calibration. Only available after validation start.
qconfig_dicts: See `QuantizedAwareTraining` for full description.
quantized: If set, this is the fully quantized model calibrated using
the validation data. Only available after validation has ended.
"""
def __init__(
self,
qconfig_dicts: Optional[QConfigDicts] = None,
preserved_attrs: Optional[List[str]] = None,
) -> None:
"""Initialize the callback."""
self.qconfig_dicts = qconfig_dicts or {"": {"": get_default_qconfig()}}
self.preserved_attrs = set([] if preserved_attrs is None else preserved_attrs)
self.prepared: Optional[torch.nn.Module] = None
self.quantized: Optional[torch.nn.Module] = None
self.should_calibrate = _requires_calibration(self.qconfig_dicts)
@classmethod
def from_config(cls, cfg: CfgNode):
return cls(
qconfig_dicts={submodule: None for submodule in cfg.QUANTIZATION.MODULES}
if cfg.QUANTIZATION.MODULES
else None,
# We explicitly pass this to maintain properties for now.
preserved_attrs=["model.backbone.size_divisibility"],
)
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""
On validation start, prepare a module for quantization by adding
observers and loading weights from current model.
"""
# Pass a copy to quantization APIs.
self.prepared = self.prepare(
deepcopy(pl_module).eval(),
configs=self.qconfig_dicts,
attrs=self.preserved_attrs,
)
def on_validation_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Convert the calibrated model to its finalized quantized version."""
self.quantized = self.convert(
self.prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
"""Also run the validation batch through the quantized model for calibration."""
if self.should_calibrate:
with torch.no_grad():
self.prepared(batch)
|
d2go-main
|
d2go/runner/callbacks/quantization.py
|
d2go-main
|
d2go/runner/callbacks/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import builtins
import logging
import sys
import time
import uuid
from functools import wraps
from typing import Any, Callable, Optional, TypeVar
from mobile_cv.common.misc.oss_utils import fb_overwritable
# Saving the builtin print to wrap it up later.
BUILTIN_PRINT = builtins.print
_T = TypeVar("_T")
@fb_overwritable()
def initialize_logging(logging_level: int) -> None:
root_logger = logging.getLogger()
root_logger.setLevel(logging_level)
def replace_print_with_logging() -> None:
builtins.print = _print_to_logging
def _print_to_logging(
*objects: Any,
sep: Optional[str] = " ",
end: Optional[str] = "\n",
file: Optional[Any] = None,
flush: bool = False,
) -> None:
"""Wraps built-in print to replace it with using the logging module. Only
writing to stdout and stderr are replaced, printing to a file will be
executed unmodified.
This function is on the module level because otherwise numba breaks.
"""
# Mimicking the behavior of Python's built-in print function.
if sep is None:
sep = " "
if end is None:
end = "\n"
# Don't replace prints to files.
if file is not None and file != sys.stdout and file != sys.stderr:
BUILTIN_PRINT(*objects, sep=sep, end=end, file=file, flush=flush)
return
logging.info(sep.join(map(str, objects)), stacklevel=3)
@fb_overwritable()
def _log_enter(category: str, name: str, unique_id: str) -> None:
logging.info(f"Entering logging context, {category=}, {name=}, {unique_id=}")
@fb_overwritable()
def _log_exit(category: str, name: str, unique_id: str, duration: float) -> None:
logging.info(
f"Exiting logging context, {category=}, {name=}, {unique_id=}, {duration=}"
)
def log_interval(
category: Optional[str] = None, name: Optional[str] = None
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
_unique_id = uuid.uuid1().int >> 97
_overwrite_category = category
_overwrite_name = name
def log_interval_deco(func: Callable[..., _T]) -> Callable[..., _T]:
_category = _overwrite_category or func.__qualname__.split(".")[0]
_name = _overwrite_name or func.__name__
@wraps(func)
def wrapper(*args, **kwargs) -> _T:
_log_enter(_category, _name, _unique_id)
_start = time.perf_counter()
ret = func(*args, **kwargs)
_log_exit(_category, _name, _unique_id, time.perf_counter() - _start)
return ret
return wrapper
return log_interval_deco
|
d2go-main
|
d2go/utils/logging.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import warnings
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, Optional
import detectron2.utils.comm as comm
import torch
from d2go.config.config import CfgNode
from d2go.utils.tensorboard_log_util import get_tensorboard_log_dir # noqa: forwarding
from detectron2.utils.file_io import PathManager
from tabulate import tabulate
logger = logging.getLogger(__name__)
# Subdirectory with model configurations dumped by the training binary.
TRAINED_MODEL_CONFIGS_DIR: str = "trained_model_configs"
def check_version(library, min_version, warning_only=False):
"""Check the version of the library satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version : str
Minimum version
warning_only : bool
Printing a warning instead of throwing an exception.
"""
from distutils.version import LooseVersion
version = library.__version__
bad_version = LooseVersion(version) < LooseVersion(min_version)
if bad_version:
msg = (
f"Installed {library.__name__} version {version} does not satisfy the "
f"minimum required version {min_version}"
)
if warning_only:
warnings.warn(msg)
else:
raise AssertionError(msg)
return False
return True
def metrics_dict_to_metrics_table(dic):
assert isinstance(dic, dict)
ret = []
for key in sorted(dic.keys()):
value = dic[key]
if isinstance(value, dict):
for sub_metrics in metrics_dict_to_metrics_table(value):
ret.append([key] + sub_metrics)
else:
ret.append([key, value])
return ret
def print_metrics_table(metrics_dic):
metrics_table = metrics_dict_to_metrics_table(metrics_dic)
metrics_tabulate = tabulate(
metrics_table,
tablefmt="pipe",
headers=["model", "dataset", "task", "metric", "score"],
)
logger.info("Metrics table: \n" + metrics_tabulate)
def dump_trained_model_configs(
output_dir: str, trained_cfgs: Dict[str, CfgNode]
) -> Dict[str, str]:
"""Writes trained model config files to output_dir.
Args:
output_dir: output file directory.
trained_cfgs: map from model name to the config of trained model.
Returns:
A map of model name to model config path.
"""
trained_model_configs = {}
trained_model_config_dir = os.path.join(output_dir, TRAINED_MODEL_CONFIGS_DIR)
PathManager.mkdirs(trained_model_config_dir)
for name, trained_cfg in trained_cfgs.items():
config_file = os.path.join(trained_model_config_dir, "{}.yaml".format(name))
trained_model_configs[name] = config_file
if comm.is_main_process():
logger.info("Dumping trained config file: {}".format(config_file))
with PathManager.open(config_file, "w") as f:
f.write(trained_cfg.dump())
comm.synchronize()
logger.info("Finished dumping trained config file")
return trained_model_configs
def save_binary_outputs(filename: str, outputs: Any) -> None:
"""Helper function to serialize and save function outputs in binary format."""
with PathManager.open(filename, "wb") as f:
torch.save(outputs, f)
def load_binary_outputs(filename: str) -> Any:
"""Helper function to load and deserialize function outputs saved in binary format."""
with PathManager.open(filename, "rb") as f:
return torch.load(f)
@contextmanager
def mode(net: torch.nn.Module, training: bool) -> Iterator[torch.nn.Module]:
"""Temporarily switch to training/evaluation mode."""
istrain = net.training
try:
net.train(training)
yield net
finally:
net.train(istrain)
def _log_api_usage(identifier: str):
"""
Internal function used to log the usage of different d2go components
inside facebook's infra.
"""
torch._C._log_api_usage_once("d2go." + identifier)
def _log_api_usage_on_main_process(identifier: str):
"""
Log the usage of d2go API only on the main process.
"""
if comm.is_main_process():
_log_api_usage(identifier)
|
d2go-main
|
d2go/utils/misc.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
def iterate_module_named_parameters(model, check_requires_grad=True):
"""Iterate over all parameters for the model"""
memo = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if check_requires_grad and not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
yield module_name, module, module_param_name, value
|
d2go-main
|
d2go/utils/parse_module_params.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import deque
import cv2
import detectron2.data.transforms as T
import torch
from d2go.model_zoo import model_zoo
from detectron2.data import MetadataCatalog
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class DemoPredictor:
def __init__(self, model, min_size_test=224, max_size_test=320, input_format="RGB"):
self.model = model
self.model.eval()
self.aug = T.ResizeShortestEdge([min_size_test, min_size_test], max_size_test)
self.input_format = input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class VisualizationDemo(object):
def __init__(self, cfg, config_file, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
model = model_zoo.get(config_file, trained=True) # runner.build_model(cfg)
self.predictor = DemoPredictor(model)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(
frame, predictions
)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
|
d2go-main
|
d2go/utils/demo_predictor.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import detectron2.utils.comm as comm
import torch
from d2go.utils.visualization import VisualizerWrapper
from detectron2.utils.file_io import PathManager
logger = logging.getLogger(__name__)
def get_rel_loss_checker(rel_thres=1.0):
def _loss_delta_exceeds_thresh(prev_loss, loss):
if prev_loss is None:
return True
prev_sum = sum(prev_loss.values())
cur_sum = sum(loss.values())
if prev_sum <= 0:
return True
if (cur_sum - prev_sum) / prev_sum >= rel_thres:
return False
return True
return _loss_delta_exceeds_thresh
class TrainImageWriter(object):
def __init__(self, cfg, tbx_writer, max_count=5):
"""max_count: max number of data written to tensorboard, additional call
will be ignored
"""
self.visualizer = VisualizerWrapper(cfg)
self.writer = tbx_writer
self.max_count = max_count
self.counter = 0
def __call__(self, all_data):
if self.max_count > 0 and self.counter >= self.max_count:
return
data = all_data["data"]
step = all_data["step"]
for idx, cur_data in enumerate(data):
name = f"train_abnormal_losses/{step}/img_{idx}/{cur_data['file_name']}"
vis_img = self.visualizer.visualize_train_input(cur_data)
self.writer._writer.add_image(name, vis_img, step, dataformats="HWC")
logger.warning(
"Train images with bad losses written to tensorboard 'train_abnormal_losses'"
)
self.counter += 1
class FileWriter(object):
def __init__(self, output_dir, max_count=5):
"""max_count: max number of data written to tensorboard, additional call
will be ignored
"""
self.output_dir = output_dir
self.max_count = max_count
self.counter = 0
def __call__(self, all_data):
if self.max_count > 0 and self.counter >= self.max_count:
return
output_dir = self.output_dir
step = all_data["step"]
losses = all_data["losses"]
file_name = f"train_abnormal_losses_{step}_{comm.get_rank()}.pth"
out_file = os.path.join(output_dir, file_name)
with PathManager.open(out_file, "wb") as fp:
torch.save(all_data, fp)
logger.warning(
f"Iteration {step} has bad losses {losses}. "
f"all information saved to {out_file}."
)
self.counter += 1
def get_writers(cfg, tbx_writer):
writers = [TrainImageWriter(cfg, tbx_writer), FileWriter(cfg.OUTPUT_DIR)]
return writers
class AbnormalLossChecker(object):
def __init__(self, start_iter, writers, valid_loss_checker=None):
self.valid_loss_checker = valid_loss_checker or get_rel_loss_checker()
self.writers = writers or []
assert isinstance(self.writers, list)
self.prev_index = start_iter
self.prev_loss = None
def check_step(self, losses, data=None, model=None):
with torch.no_grad():
is_valid = self.valid_loss_checker(self.prev_loss, losses)
if not is_valid:
self._write_invalid_info(losses, data, model)
self.prev_index += 1
self.prev_loss = losses
return is_valid
def _write_invalid_info(self, losses, data, model):
all_info = {
"losses": losses,
"data": data,
"model": getattr(model, "module", model),
"prev_loss": self.prev_loss,
"step": self.prev_index + 1,
}
for writer in self.writers:
writer(all_info)
class AbnormalLossCheckerWrapper(torch.nn.Module):
def __init__(self, model, checker):
super().__init__()
self.checker = checker
self.model = model
self.training = model.training
def forward(self, x):
losses = self.model(x)
self.checker.check_step(losses, data=x, model=self.model)
return losses
|
d2go-main
|
d2go/utils/abnormal_checker.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def get_model_zoo_storage_prefix() -> str:
return "https://mobile-cv.s3-us-west-2.amazonaws.com/d2go/models/"
@fb_overwritable()
def get_launch_environment():
return "local"
MODEL_ZOO_STORAGE_PREFIX = get_model_zoo_storage_prefix()
|
d2go-main
|
d2go/utils/launch_environment.py
|
#!/usr/bin/env python3
import itertools
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
class EMAState(object):
"""Stores Exponential Moving Average state for a model.
Args:
decay: EMA decay factor, should be in [0, 1]. A decay of 0 corresponds to
always using the latest value (no EMA) and a decay of 1 corresponds to
not updating weights after initialization. Default to 0.999.
device: If not None, move model EMA state to device.
"""
def __init__(self, decay: float = 0.999, device: Optional[str] = None):
if decay < 0 or decay > 1.0:
raise ValueError(f"Decay should be in [0, 1], {decay} was given.")
self.decay: float = decay
self.state: Dict[str, Any] = {}
self.device: Optional[str] = device
@classmethod
def from_model(
cls,
model: nn.Module,
decay: float = 0.999,
device: Optional[str] = None,
) -> "EMAState":
"""Constructs model state from the model and move to device if given."""
ret = cls(decay, device)
ret.load_from(model)
return ret
def load_from(self, model: nn.Module) -> None:
"""Load state from the model."""
self.state.clear()
for name, val in self._get_model_state_iterator(model):
val = val.detach().clone()
self.state[name] = val.to(self.device) if self.device else val
def has_inited(self) -> bool:
return len(self.state) > 0
def apply_to(self, model: nn.Module) -> None:
"""Apply EMA state to the model."""
with torch.no_grad():
for name, val in self._get_model_state_iterator(model):
assert (
name in self.state
), f"Name {name} does not exist, available names are {self.state.keys()}"
val.copy_(self.state[name])
def state_dict(self) -> Dict[str, Any]:
return self.state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.state.clear()
for name, val in state_dict.items():
self.state[name] = val.to(self.device) if self.device else val
def to(self, device: torch.device) -> None:
"""moves EMA state to device."""
for name, val in self.state.items():
self.state[name] = val.to(device)
def _get_model_state_iterator(self, model: nn.Module):
param_iter = model.named_parameters()
# pyre-fixme[16]: `nn.Module` has no attribute `named_buffers`.
buffer_iter = model.named_buffers()
return itertools.chain(param_iter, buffer_iter)
def update(self, model: nn.Module) -> None:
with torch.no_grad():
for name, val in self._get_model_state_iterator(model):
ema_val = self.state[name]
if self.device:
val = val.to(self.device)
ema_val.copy_(ema_val * self.decay + val * (1.0 - self.decay))
|
d2go-main
|
d2go/utils/ema_state.py
|
import logging
import os
import pickle
import torch
from d2go.config import CfgNode as CN
from detectron2.utils.file_io import PathManager
from mobile_cv.torch.utils_pytorch import comm
from torch.cuda._memory_viz import segment_plot, trace_plot
logger: logging.Logger = logging.getLogger(__name__)
def add_memory_profiler_configs(_C: CN):
_C.MEMORY_PROFILER = CN()
_C.MEMORY_PROFILER.ENABLED = False
# max number of trace entries in memory snapshot
_C.MEMORY_PROFILER.TRACE_MAX_ENTRIES = 1000000
# Configs to be used by d2go.utils.gpu_memory_profiler.D2GoGpuMemorySnapshot
# determine the number of iterations to log memory snapshots for
_C.MEMORY_PROFILER.LOG_N_STEPS = 3
# determine at what iteration to start recording gpu memory
_C.MEMORY_PROFILER.LOG_DURING_TRAIN_AT = 550
def add_zoomer_default_config(_C: CN):
_C.ZOOMER = CN()
_C.ZOOMER.ENABLE_STACK_TRACING = (
False # Do not enable by default, since it may cause performance regression
)
_C.ZOOMER.ENABLE_MEMORY_PROFILING = False
def omm_logger_wrapper(output_dir):
def oom_logger(
device: int, alloc: int, device_alloc: int, device_free: int
) -> None:
"""
Log memory snapshot in the event of CUDA OOM.
"""
logger.info(
f"Saving memory snapshot device: {device}, alloc: {alloc}, device_alloc: {device_alloc}, device_free: {device_free}"
)
try:
log_memory_snapshot(output_dir, file_prefix="oom")
except Exception as e:
logger.error(f"Failed to log memory snapshot during OOM {e}")
return oom_logger
def log_memory_snapshot(output_dir: str, file_prefix: str = "") -> None:
"""
Log memory snapshots to output_dir
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not logging snapshot")
return
try:
rank = comm.get_rank()
save_dir = os.path.join(
output_dir, "memory_snapshot", f"{file_prefix}_rank{rank}"
)
logger.info(f"Logging memory snapshot to {save_dir}")
snapshot = torch.cuda.memory._snapshot()
dump_snapshot(save_dir, snapshot)
except Exception as e:
logger.error(f"Failed to log memory snapshot to {save_dir}: {e}")
def dump_snapshot(save_dir: str, snapshot):
"""
Dump memory snapshot and useful plots to save_dir.
This is a rewrite of torch.cuda.memory._dump_snapshot() with PathManager.
"""
if not PathManager.exists(save_dir):
PathManager.mkdirs(save_dir)
with PathManager.open(os.path.join(save_dir, "snapshot.pickle"), "wb") as f:
pickle.dump(snapshot, f)
with PathManager.open(os.path.join(save_dir, "trace_plot.html"), "w") as f:
f.write(trace_plot(snapshot))
with PathManager.open(os.path.join(save_dir, "segment_plot.html"), "w") as f:
f.write(segment_plot(snapshot))
logger.info(f"Saved memory snapshot to {save_dir}")
def record_memory_history(trace_max_entries=1000000) -> None:
"""
Start recording memory history and stack traces.
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not recording memory history")
return
torch.cuda.memory._record_memory_history(
enabled="all", max_entries=trace_max_entries
)
logger.info("Started recording memory history")
def attach_oom_logger(output_dir, trace_max_entries=1000000) -> None:
"""
Start recording memory history and attach the OOM logger.
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not attaching OOM logger")
return
record_memory_history(trace_max_entries)
torch._C._cuda_attach_out_of_memory_observer(omm_logger_wrapper(output_dir))
logger.info("Attached GPU OOM logger")
|
d2go-main
|
d2go/utils/gpu_memory_profiler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.utils import flop_calculator as _flop_calculator # noqa
# @fb-only: from d2go.utils import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/utils/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import os
import traceback
import detectron2.utils.comm as comm
import mobile_cv.lut.lib.pt.flops_utils as flops_utils
import torch
from d2go.utils.helper import run_once
from detectron2.utils.analysis import FlopCountAnalysis
from detectron2.utils.file_io import PathManager
from detectron2.utils.registry import Registry
from fvcore.nn import flop_count_str, flop_count_table
PROFILER_REGISTRY = Registry("PROFILER")
logger = logging.getLogger(__name__)
@torch.no_grad()
def dump_flops_info(model, inputs, output_dir, use_eval_mode=True):
"""
Dump flops information about model, using the given model inputs.
Information are dumped to output_dir using various flop counting tools
in different formats. Only a simple table is printed to terminal.
Args:
inputs: a tuple of positional arguments used to call model with.
use_eval_mode: turn the model into eval mode for flop counting. Otherwise,
will use the original mode. It's recommended to use eval mode, because
training mode typically follows a different codepath.
"""
if not comm.is_main_process():
return
logger.info("Evaluating model's number of parameters and FLOPS")
try:
model = copy.deepcopy(model)
except Exception:
logger.info("Failed to deepcopy the model and skip FlopsEstimation.")
return
# Delete other forward_pre_hooks so they are not simultaneously called.
# The keys are wrapped in a list to avoid mutating ordered_dict during iteration.
# See https://github.com/pytorch/pytorch/issues/49739 for more details.
for hook_key in list(model._forward_pre_hooks.keys()):
logger.warning(f"Forward hook with key {hook_key} was removed in flop counter.")
model._forward_pre_hooks.pop(hook_key)
if use_eval_mode:
model.eval()
inputs = copy.deepcopy(inputs)
# 1. using mobile_cv flop counter
try:
fest = flops_utils.FlopsEstimation(model)
with fest.enable():
model(*inputs)
fest.add_flops_info()
model_str = str(model)
output_file = os.path.join(output_dir, "flops_str_mobilecv.txt")
with PathManager.open(output_file, "w") as f:
f.write(model_str)
logger.info(f"Flops info written to {output_file}")
except Exception:
logger.exception("Failed to estimate flops using mobile_cv's FlopsEstimation")
# 2. using d2/fvcore's flop counter
output_file = os.path.join(output_dir, "flops_str_fvcore.txt")
try:
flops = FlopCountAnalysis(model, inputs)
# 2.1: dump as model str
model_str = flop_count_str(flops)
with PathManager.open(output_file, "w") as f:
f.write(model_str)
logger.info(f"Flops info written to {output_file}")
# 2.2: dump as table
flops_table = flop_count_table(flops, max_depth=10)
output_file = os.path.join(output_dir, "flops_table_fvcore.txt")
with PathManager.open(output_file, "w") as f:
f.write(flops_table)
logger.info(f"Flops table (full version) written to {output_file}")
# 2.3: print a table with a shallow depth
flops_table = flop_count_table(flops, max_depth=3)
logger.info("Flops table:\n" + flops_table)
except Exception:
with PathManager.open(output_file, "w") as f:
traceback.print_exc(file=f)
logger.warning(
"Failed to estimate flops using detectron2's FlopCountAnalysis. "
f"Error written to {output_file}."
)
flops = float("nan")
return flops
def add_flop_printing_hook(
model,
output_dir: str,
):
"""
Add a pytorch module forward hook that will print/save flops of the whole model
at the first time the model is called.
Args:
output_dir: directory to save more detailed flop info
"""
def hook(module, input):
handle.remove()
dump_flops_info(module, input, output_dir)
return input
handle = model.register_forward_pre_hook(hook)
@PROFILER_REGISTRY.register()
def default_flop_counter(model, cfg):
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
)
# TODO: deepcopy() not supported for FSDP yet (https://github.com/pytorch/pytorch/issues/82070), so we disable flop counter for now
if isinstance(model, FSDP):
logger.warn(
"Default flop counter is disabled because it's not supported for FSDP yet. "
)
return
return add_flop_printing_hook(model, cfg.OUTPUT_DIR)
# NOTE: the logging can be too long and messsy when printing flops multiple
# times, especially when running eval during training, thus using `run_once`
# to limit it. `dump_flops_info` can log flops more concisely.
@run_once()
def add_print_flops_callback(cfg, model, disable_after_callback=True):
def _print_flops_callback(self, model, model_data):
self.add_flops_info()
logger.info("Callback: model flops info:\n{}".format(model))
def _guess_batch_size():
# Inputs are meta-arch dependent, the most general solution will be
# adding a function like `get_batch_size()` to each meta arch
ret = 1
try:
model_input_shapes = model_data(model)["input_shapes"]
assert isinstance(model_input_shapes, list)
assert len(model_input_shapes) > 0
# assuming the first input is a list of images
ret = len(model_input_shapes[0])
except Exception:
ret = cfg.SOLVER.IMS_PER_BATCH // comm.get_world_size()
logger.warning(
"Could not get batch size, compute from"
f" `cfg.SOLVER.IMS_PER_BATCH`={ret}"
)
pass
return ret
nparams, nflops = self.get_flops()
batch_size = _guess_batch_size()
nflops_single = nflops / batch_size
logger.info(
f"Model parameters (M): {nparams}, "
f"MFlops (batch_size={batch_size}): {nflops}, "
f"MFlops (batch_size=1): {nflops_single}"
)
if disable_after_callback:
self.set_enable(False)
fest = flops_utils.FlopsEstimation(model).set_callback(_print_flops_callback)
logger.info("Added callback to log flops info after the first inference")
fest.set_enable(True)
return fest
def attach_profiler(profiler_name):
return PROFILER_REGISTRY.get(profiler_name)
def attach_profilers(cfg, model):
for profiler in cfg.PROFILERS:
attach_profiler(profiler)(model, cfg)
|
d2go-main
|
d2go/utils/flop_calculator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from functools import lru_cache
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def get_tensorboard_log_dir(output_dir):
return output_dir
|
d2go-main
|
d2go/utils/tensorboard_log_util.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Optional, Type
from d2go.config import CfgNode as CN
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.data import DatasetCatalog, detection_utils as utils, MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.events import get_event_storage
from detectron2.utils.visualizer import Visualizer
def add_tensorboard_default_configs(_C):
_C.TENSORBOARD = CN()
# Output from dataloader will be written to tensorboard at this frequency
_C.TENSORBOARD.TRAIN_LOADER_VIS_WRITE_PERIOD = 20
# This controls max number of images over all batches, be considerate when
# increasing this number because it takes disk space and slows down the training
_C.TENSORBOARD.TRAIN_LOADER_VIS_MAX_IMAGES = 16
# This controls the max number of images to visualize each write period
_C.TENSORBOARD.TRAIN_LOADER_VIS_MAX_BATCH_IMAGES = 16
# Max number of images per dataset to visualize in tensorboard during evaluation
_C.TENSORBOARD.TEST_VIS_MAX_IMAGES = 16
# Frequency of sending data to tensorboard during evaluation
_C.TENSORBOARD.TEST_VIS_WRITE_PERIOD = 1
# TENSORBOARD.LOG_DIR will be determined solely by OUTPUT_DIR
_C.register_deprecated_key("TENSORBOARD.LOG_DIR")
return _C
class VisualizerWrapper(object):
"""
D2's Visualizer provides low-level APIs to draw common structures, such as
draw_instance_predictions/draw_sem_seg/overlay_instances. This class provides
the high-level interface for visualizing.
"""
def __init__(self, cfg, custom_visualizer: Optional[Type[Visualizer]] = None):
self.cfg = cfg
self.visualizer = custom_visualizer or Visualizer
def _get_meta_arch_class(self):
return META_ARCH_REGISTRY.get(self.cfg.MODEL.META_ARCHITECTURE)
def visualize_train_input(self, input_dict):
"""
Visulize a single input image of model (also the output from train loader)
used for training, this contains the data augmentation.
"""
per_image = input_dict
cfg = self.cfg
# customization
if hasattr(self._get_meta_arch_class(), "visualize_train_input"):
return self._get_meta_arch_class().visualize_train_input(self, input_dict)
img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
if "dataset_name" in input_dict:
metadata = MetadataCatalog.get(input_dict["dataset_name"])
else:
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
scale = 2.0
visualizer = self.visualizer(img, metadata=metadata, scale=scale)
if "instances" in per_image:
target_fields = per_image["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes", None),
masks=target_fields.get("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
if "sem_seg" in per_image:
visualizer.draw_sem_seg(per_image["sem_seg"], area_threshold=0, alpha=0.5)
return visualizer.get_output().get_image()
def visualize_test_output(
self, dataset_name, dataset_mapper, input_dict, output_dict
):
"""
Visualize the output of model
"""
# customization
if hasattr(self._get_meta_arch_class(), "visualize_test_output"):
return self._get_meta_arch_class().visualize_test_output(
self, dataset_name, dataset_mapper, input_dict, output_dict
)
image = dataset_mapper._read_image(input_dict, "RGB")
visualizer = self.visualizer(image, metadata=MetadataCatalog.get(dataset_name))
if "panoptic_seg" in output_dict:
panoptic_seg, segments_info = output_dict["panoptic_seg"]
visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to("cpu"), segments_info
)
if "instances" in output_dict:
visualizer.draw_instance_predictions(output_dict["instances"].to("cpu"))
if "sem_seg" in output_dict:
visualizer.draw_sem_seg(
output_dict["sem_seg"].argmax(dim=0).to("cpu"),
area_threshold=0,
alpha=0.5,
)
return visualizer.get_output().get_image()
def visualize_dataset_dict(self, dataset_name, dataset_mapper, dataset_dict):
"""
Visualize the dataset_dict
"""
image = dataset_mapper._read_image(dataset_dict, "RGB")
visualizer = self.visualizer(image, metadata=MetadataCatalog.get(dataset_name))
visualizer.draw_dataset_dict(dataset_dict)
return visualizer.get_output().get_image()
class DataLoaderVisWrapper:
"""
Wrap the data loader to visualize its output via TensorBoardX at given frequency.
"""
def __init__(
self,
cfg,
tbx_writer,
data_loader,
visualizer: Optional[Type[VisualizerWrapper]] = None,
):
self.tbx_writer = tbx_writer
self.data_loader = data_loader
self._visualizer = visualizer(cfg) if visualizer else VisualizerWrapper(cfg)
self.log_frequency = cfg.TENSORBOARD.TRAIN_LOADER_VIS_WRITE_PERIOD
self.log_limit = cfg.TENSORBOARD.TRAIN_LOADER_VIS_MAX_IMAGES
self.batch_log_limit = cfg.TENSORBOARD.TRAIN_LOADER_VIS_MAX_BATCH_IMAGES
assert self.log_frequency >= 0
assert self.log_limit >= 0
assert self.batch_log_limit >= 0
self._remaining = self.log_limit
def __iter__(self):
for data in self.data_loader:
self._maybe_write_vis(data)
yield data
def _maybe_write_vis(self, data):
try:
storage = get_event_storage()
except AssertionError:
# wrapped data loader might be used outside EventStorage, don't visualize
# anything
return
if (
self.log_frequency == 0
or not storage.iter % self.log_frequency == 0
or self._remaining <= 0
):
return
length = min(len(data), min(self.batch_log_limit, self._remaining))
data = data[:length]
self._remaining -= length
for i, per_image in enumerate(data):
vis_image = self._visualizer.visualize_train_input(per_image)
tag = [f"train_loader_batch_{storage.iter}"]
if "dataset_name" in per_image:
tag += [per_image["dataset_name"]]
if "file_name" in per_image:
tag += [f"img_{i}", per_image["file_name"]]
if isinstance(vis_image, dict):
for k in vis_image:
self.tbx_writer._writer.add_image(
tag="/".join(tag + [k]),
img_tensor=vis_image[k],
global_step=storage.iter,
dataformats="HWC",
)
else:
self.tbx_writer._writer.add_image(
tag="/".join(tag),
img_tensor=vis_image,
global_step=storage.iter,
dataformats="HWC",
)
class VisualizationEvaluator(DatasetEvaluator):
"""
Visualize GT and prediction during evaluation. It doesn't calculate any
metrics, just uses evaluator's interface as hook.
"""
# NOTE: the evaluator will be created for every eval (during training and
# after training), so the images will be logged multiple times, use a global
# counter to differentiate them in TB.
_counter = 0
def __init__(
self,
cfg,
tbx_writer,
dataset_mapper,
dataset_name,
train_iter=None,
tag_postfix=None,
visualizer: Optional[Type[VisualizerWrapper]] = None,
):
self.tbx_writer = tbx_writer
self.dataset_mapper = dataset_mapper
self.dataset_name = dataset_name
self._visualizer = visualizer(cfg) if visualizer else VisualizerWrapper(cfg)
self.train_iter = train_iter or VisualizationEvaluator._counter
self.tag_postfix = tag_postfix or ""
self.log_limit = max(cfg.TENSORBOARD.TEST_VIS_MAX_IMAGES, 0)
self.log_frequency = cfg.TENSORBOARD.TEST_VIS_WRITE_PERIOD
self._metadata = None
self._dataset_dict = None
self._file_name_to_dataset_dict = None
if self.log_limit > 0:
self._initialize_dataset_dict(dataset_name)
VisualizationEvaluator._counter += 1
self.reset()
def _initialize_dataset_dict(self, dataset_name: str) -> None:
# Enable overriding defaults in case the dataset hasn't been registered.
self._metadata = MetadataCatalog.get(dataset_name)
# NOTE: Since there's no GT from test loader, we need to get GT from
# the dataset_dict, this assumes the test data loader uses the item from
# dataset_dict in the default way.
self._dataset_dict = DatasetCatalog.get(dataset_name)
self._file_name_to_dataset_dict = {
dic["file_name"]: dic for dic in self._dataset_dict
}
def reset(self):
self._iter = 0
self._log_remaining = self.log_limit
def process(self, inputs, outputs):
if (
self.log_frequency == 0
or self._iter % self.log_frequency != 0
or self._log_remaining <= 0
):
self._iter += 1
return
for input, output in zip(inputs, outputs):
file_name = input["file_name"]
dataset_dict = self._file_name_to_dataset_dict[file_name]
gt_img = self._visualizer.visualize_dataset_dict(
self.dataset_name, self.dataset_mapper, dataset_dict
)
pred_img = self._visualizer.visualize_test_output(
self.dataset_name, self.dataset_mapper, input, output
)
tag_base = f"{self.dataset_name}{self.tag_postfix}/eval_iter_{self._iter}/{file_name}"
self.tbx_writer._writer.add_image(
f"{tag_base}/GT",
gt_img,
self.train_iter,
dataformats="HWC",
)
if not isinstance(pred_img, dict):
pred_img = {"Pred": pred_img}
for img_type in pred_img.keys():
self.tbx_writer._writer.add_image(
f"{tag_base}/{img_type}",
pred_img[img_type],
self.train_iter,
dataformats="HWC",
)
self._log_remaining -= 1
self._iter += 1
def has_finished_process(self):
return True
|
d2go-main
|
d2go/utils/visualization.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/python
import importlib
import os
from functools import wraps
from typing import Any, Callable, List, TypeVar
import detectron2.utils.comm as comm
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultTrainer
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
)
from detectron2.utils.events import TensorboardXWriter
from mobile_cv.common.misc.oss_utils import fb_overwritable
T = TypeVar("T")
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
__all__ = [
"run_once",
"retryable",
"get_dir_path",
"TensorboardXWriter", # TODO: move to D2Go's vis utils if needed
"D2Trainer", # TODO: move to trainer folder
]
class MultipleFunctionCallError(Exception):
pass
@fb_overwritable()
def run_once(
raise_on_multiple: bool = False,
# pyre-fixme[34]: `Variable[T]` isn't present in the function's parameters.
) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""
A decorator to wrap a function such that it only ever runs once
Useful, for example, with exit handlers that could be run via atexit or
via a signal handler. The decorator will cache the result of the first call
and return it on subsequent calls. If `raise_on_multiple` is set, any call
to the function after the first one will raise a
`MultipleFunctionCallError`.
"""
def decorator(func: Callable[..., T]) -> (Callable[..., T]):
signal: List[T] = []
@wraps(func)
def wrapper(*args, **kwargs) -> T:
if signal:
if raise_on_multiple:
raise MultipleFunctionCallError(
"Function %s was called multiple times" % func.__name__
)
return signal[0]
signal.append(func(*args, **kwargs))
return signal[0]
return wrapper
return decorator
@fb_overwritable()
class retryable(object):
"""Fake retryable function"""
def __init__(self, num_tries=1, sleep_time=0.1):
pass
def __call__(self, func: F) -> F:
return func
@fb_overwritable()
def get_dir_path(relative_path):
"""Return a path for a directory in this package, extracting if necessary
For an entire directory within the par file (zip, fastzip) or lpar
structure, this function will check to see if the contents are extracted;
extracting each file that has not been extracted. It returns the path of
a directory containing the expected contents, making sure permissions are
correct.
Returns a string path, throws exeption on error
"""
return os.path.dirname(importlib.import_module(relative_path).__file__)
class D2Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
|
d2go-main
|
d2go/utils/helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import re
import time
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.utils.file_io import PathManager
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
logger = logging.getLogger(__name__)
def fetch_checkpoints_till_final(checkpoint_dir):
"""
A generator that yields all checkpoint paths under the given directory, it'll
keep refreshing until model_final is found.
"""
MIN_SLEEP_INTERVAL = 1.0 # in seconds
MAX_SLEEP_INTERVAL = 60.0 # in seconds
sleep_interval = MIN_SLEEP_INTERVAL
finished_checkpoints = set()
def _add_and_log(path):
finished_checkpoints.add(path)
logger.info("Found checkpoint: {}".format(path))
return path
def _log_and_sleep(sleep_interval):
logger.info(
"Sleep {} seconds while waiting for model_final.pth".format(sleep_interval)
)
time.sleep(sleep_interval)
return min(sleep_interval * 2, MAX_SLEEP_INTERVAL)
def _get_lightning_checkpoints(path: str):
return [
os.path.join(path, x)
for x in PathManager.ls(path)
if x.endswith(ModelCheckpoint.FILE_EXTENSION)
and not x.startswith(ModelCheckpoint.CHECKPOINT_NAME_LAST)
]
while True:
if not PathManager.exists(checkpoint_dir):
sleep_interval = _log_and_sleep(sleep_interval)
continue
checkpoint_paths = DetectionCheckpointer(
None, save_dir=checkpoint_dir
).get_all_checkpoint_files()
checkpoint_paths = [
cpt_path
for cpt_path in checkpoint_paths
if os.path.basename(cpt_path).startswith("model")
]
checkpoint_paths.extend(_get_lightning_checkpoints(checkpoint_dir))
final_model_path = None
periodic_checkpoints = []
for path in sorted(checkpoint_paths):
if path.endswith("model_final.pth") or path.endswith("model_final.ckpt"):
final_model_path = path
continue
if path.endswith(ModelCheckpoint.FILE_EXTENSION):
# Lightning checkpoint
model_iter = int(
re.findall(
r"(?<=step=)\d+(?={})".format(ModelCheckpoint.FILE_EXTENSION),
path,
)[0]
)
else:
model_iter = int(re.findall(r"(?<=model_)\d+(?=\.pth)", path)[0])
periodic_checkpoints.append((path, model_iter))
periodic_checkpoints = [
pc for pc in periodic_checkpoints if pc[0] not in finished_checkpoints
]
periodic_checkpoints = sorted(periodic_checkpoints, key=lambda x: x[1])
for pc in periodic_checkpoints:
yield _add_and_log(pc[0])
sleep_interval = MIN_SLEEP_INTERVAL
if final_model_path is None:
sleep_interval = _log_and_sleep(sleep_interval)
else:
yield _add_and_log(final_model_path)
break
|
d2go-main
|
d2go/utils/validation_monitor.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Callable, TypeVar
from torch.distributed.elastic.multiprocessing.errors import (
_NOT_AVAILABLE,
ChildFailedError,
get_error_handler,
)
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT")
def mast_error_handler(func: Callable[..., _RT]) -> Callable[..., _RT]:
def wrapper(*args, **kwargs) -> _RT:
logger.info("Starting main")
error_handler = get_error_handler()
logger.debug(f"Error handler is: {type(error_handler)=}, {error_handler=}")
error_handler.initialize()
logger.debug("Error handler has been initialized")
try:
logger.debug("Entered main for d2go")
return func(*args, **kwargs)
except ChildFailedError as e:
logger.info(f"Got a ChildFailedError: {e=}")
rank, failure = e.get_first_failure()
if failure.error_file != _NOT_AVAILABLE:
error_handler.dump_error_file(failure.error_file, failure.exitcode)
else:
logger.info(
(
f"local_rank {rank} FAILED with no error file."
f" Decorate your entrypoint fn with @record for traceback info."
f" See: https://pytorch.org/docs/stable/elastic/errors.html"
)
)
raise
except Exception as e:
logger.info(f"Caught a generic exception: {e=}")
error_handler.record_exception(e)
raise
return wrapper
def gather_mast_errors(func: Callable[..., _RT]) -> Callable[..., _RT]:
def wrapper(*args, **kwargs) -> _RT:
logger.info("Starting CLI application")
try:
return func(*args, **kwargs)
finally:
logging.info("Entering final reply file generation step")
import glob
import os
import shutil
torchx_reply_files = glob.glob("/tmp/torchx_*/**/*.json", recursive=True)
logger.info(
f"Found the following reply files on this host: {torchx_reply_files}"
)
first_reply_file = None
first_reply_file_st = float("Inf")
for f in torchx_reply_files:
if (mtime := os.stat(f).st_mtime) < first_reply_file_st:
first_reply_file = f
first_reply_file_st = mtime
if first_reply_file and os.environ.get("MAST_HPC_TASK_FAILURE_REPLY_FILE"):
logger.info(
f'Copying {first_reply_file=} to {os.environ["MAST_HPC_TASK_FAILURE_REPLY_FILE"]}'
)
shutil.copyfile(
first_reply_file, os.environ["MAST_HPC_TASK_FAILURE_REPLY_FILE"]
)
return wrapper
|
d2go-main
|
d2go/utils/mast.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
def generate_test_input(height, width, is_train, num_classes, super_classes=None):
random_image = torch.rand(3, height, width).to(torch.float32)
ret = {"image": random_image}
if is_train:
mask_size = (
(height, width)
if super_classes is None
else (len(super_classes), height, width)
)
random_mask = torch.randint(low=0, high=num_classes, size=mask_size).to(
torch.int64
)
ret["sem_seg"] = random_mask
return ret
def validate_test_output(output, height, width, num_classes, super_classes=None):
sem_seg_per_image = output["sem_seg"]
if super_classes is None: # None MCS case
detect_c_out, detect_h_out, detect_w_out = sem_seg_per_image.size()
assert detect_c_out == num_classes, detect_c_out
assert detect_h_out == height, (detect_h_out, height)
assert detect_w_out == width, (detect_w_out, width)
else: # MCS case
assert isinstance(sem_seg_per_image, dict)
assert all(k in super_classes for k in sem_seg_per_image), (
sem_seg_per_image.keys(),
super_classes,
)
for class_name, mask in sem_seg_per_image.items():
assert isinstance(class_name, str)
detect_c_out, detect_h_out, detect_w_out = mask.size()
assert detect_c_out == num_classes, detect_c_out
assert detect_h_out == height, (detect_h_out, height)
assert detect_w_out == width, (detect_w_out, width)
|
d2go-main
|
d2go/utils/testing/sem_seg_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import copy
import shutil
import tempfile
import unittest
from typing import Optional
import d2go.data.transforms.box_utils as bu
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner.default_runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
)
from detectron2.structures import Boxes, Instances
from mobile_cv.predictor.api import create_predictor
from parameterized import parameterized
def _get_image_with_box(image_size, boxes: Optional[Boxes] = None):
"""Draw boxes on the image, one box per channel, use values 10, 20, ..."""
ret = torch.zeros((3, image_size[0], image_size[1]))
if boxes is None:
return ret
assert len(boxes) <= ret.shape[0]
for idx, box in enumerate(boxes):
x0, y0, x1, y1 = box.int().tolist()
ret[idx, y0:y1, x0:x1] = (idx + 1) * 10
return ret
def _get_boxes_from_image(image, scale_xy=None):
"""Extract boxes from image created by `_get_image_with_box()`"""
cur_img_int = ((image / 10.0 + 0.5).int().float() * 10.0).int()
values = torch.unique(cur_img_int)
gt_values = [x * 10 for x in range(len(values))]
assert set(values.tolist()) == set(gt_values)
boxes = []
for idx in range(cur_img_int.shape[0]):
val = torch.unique(cur_img_int[idx]).tolist()
val = max(val)
if val == 0:
continue
# mask = (cur_img_int[idx, :, :] == val).int()
mask = (cur_img_int[idx, :, :] > 0).int()
box_xywh = bu.get_box_from_mask(mask.numpy())
boxes.append(bu.to_boxes_from_xywh(box_xywh))
ret = Boxes.cat(boxes)
if scale_xy is not None:
ret.scale(*scale_xy)
return ret
def get_batched_inputs(
num_images,
image_size=(1920, 1080),
resize_size=(398, 224),
boxes: Optional[Boxes] = None,
):
"""Get batched inputs in the format from d2/d2go data mapper
Draw the boxes on the images if `boxes` is not None
"""
ret = []
for idx in range(num_images):
cur = {
"file_name": f"img_{idx}.jpg",
"image_id": idx,
"dataset_name": "test_dataset",
"height": image_size[0],
"width": image_size[1],
"image": _get_image_with_box(resize_size, boxes),
}
ret.append(cur)
return ret
def _get_keypoints_from_boxes(boxes: Boxes, num_keypoints: int):
"""Use box center as keypoints"""
centers = boxes.get_centers()
kpts = torch.cat((centers, torch.ones(centers.shape[0], 1)), dim=1)
kpts = kpts.repeat(1, num_keypoints).reshape(len(boxes), num_keypoints, 3)
return kpts
def _get_scale_xy(output_size_hw, instance_size_hw):
return (
output_size_hw[1] / instance_size_hw[1],
output_size_hw[0] / instance_size_hw[0],
)
def get_detected_instances_from_image(batched_inputs, scale_xy=None):
"""Get detected instances from batched_inputs, the results are in the same
format as GeneralizedRCNN.inference()
The images in the batched_inputs are created by `get_batched_inputs()` with
`boxes` provided.
"""
ret = []
for item in batched_inputs:
cur_img = item["image"]
img_hw = cur_img.shape[1:]
boxes = _get_boxes_from_image(cur_img, scale_xy=scale_xy)
num_boxes = len(boxes)
fields = {
"pred_boxes": boxes,
"scores": torch.Tensor([1.0] * num_boxes),
"pred_classes": torch.Tensor([0] * num_boxes).int(),
"pred_keypoints": _get_keypoints_from_boxes(boxes, 21),
"pred_keypoint_heatmaps": torch.ones([num_boxes, 21, 24, 24]),
}
ins = Instances(img_hw, **fields)
ret.append(ins)
return ret
def get_detected_instances(num_images, num_instances, resize_size=(392, 224)):
"""Create an detected instances for unit test"""
assert num_instances in [1, 2]
ret = []
for _idx in range(num_images):
fields = {
"pred_boxes": Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]])),
"scores": torch.Tensor([1.0, 1.0]),
"pred_classes": torch.Tensor([0, 0]).int(),
"pred_keypoints": torch.Tensor(
[70, 60, 1.5] * 21 + [180, 100, 2.0] * 21
).reshape(2, 21, 3),
"pred_keypoint_heatmaps": torch.ones([2, 21, 24, 24]),
}
ins = Instances(resize_size, **fields)[:num_instances]
ret.append(ins)
return ret
class MockRCNNInference(object):
"""Use to mock the GeneralizedRCNN.inference()"""
def __init__(self, image_size, resize_size):
self.image_size = image_size
self.resize_size = resize_size
@property
def device(self):
return torch.device("cpu")
def __call__(
self,
batched_inputs,
detected_instances=None,
do_postprocess: bool = True,
):
return self.inference(
batched_inputs,
detected_instances,
do_postprocess,
)
def inference(
self,
batched_inputs,
detected_instances=None,
do_postprocess: bool = True,
):
scale_xy = (
_get_scale_xy(self.image_size, self.resize_size) if do_postprocess else None
)
results = get_detected_instances_from_image(batched_inputs, scale_xy=scale_xy)
# when do_postprocess is True, the result instances is stored inside a dict
if do_postprocess:
results = [{"instances": r} for r in results]
return results
def _validate_outputs(inputs, outputs):
assert len(inputs) == len(outputs)
# TODO: figure out how to validate outputs
def get_quick_test_config_opts(
fixed_single_proposals=True,
small_pooler_resolution=True,
small_resize_resolution=True,
):
ret = []
if fixed_single_proposals:
epsilon = 1e-4
ret.extend(
[
"MODEL.RPN.POST_NMS_TOPK_TEST",
1,
"TEST.DETECTIONS_PER_IMAGE",
1,
"MODEL.PROPOSAL_GENERATOR.MIN_SIZE",
0,
"MODEL.RPN.NMS_THRESH",
1.0 + epsilon,
"MODEL.ROI_HEADS.NMS_THRESH_TEST",
1.0 + epsilon,
"MODEL.ROI_HEADS.SCORE_THRESH_TEST",
0.0 - epsilon,
]
)
if small_pooler_resolution:
ret.extend(
[
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION",
1,
]
)
if small_resize_resolution:
ret.extend(
[
"INPUT.MIN_SIZE_TRAIN",
(8,),
"INPUT.MAX_SIZE_TRAIN",
9,
"INPUT.MIN_SIZE_TEST",
10,
"INPUT.MAX_SIZE_TEST",
11,
]
)
return [str(x) for x in ret]
def get_export_test_name(testcase_func, param_num, param):
predictor_type, compare_match = param.args
assert isinstance(predictor_type, str)
assert isinstance(compare_match, bool)
return "{}_{}".format(
testcase_func.__name__, parameterized.to_safe_name(predictor_type)
)
class RCNNBaseTestCases:
@staticmethod
def expand_parameterized_test_export(*args, **kwargs):
if "name_func" not in kwargs:
kwargs["name_func"] = get_export_test_name
return parameterized.expand(*args, **kwargs)
class TemplateTestCase(unittest.TestCase): # TODO: maybe subclass from TestMetaArch
def setUp(self):
self.setup_test_dir()
assert hasattr(self, "test_dir")
self.setup_custom_test()
assert hasattr(self, "runner")
assert hasattr(self, "cfg")
self.force_apply_overwrite_opts()
self.test_model = self.runner.build_model(self.cfg, eval_only=True)
def setup_test_dir(self):
self.test_dir = tempfile.mkdtemp(prefix="test_export_")
self.addCleanup(shutil.rmtree, self.test_dir)
def _get_test_image_sizes_default(self, is_train):
# model should work for any size, so don't alway use power of 2 or multiple
# of size_divisibility for testing.
side_length = max(self.test_model.backbone.size_divisibility, 10)
# make it non-square to cover error caused by messing up width & height
h, w = side_length, side_length * 2
return h, w
def _get_test_image_size_no_resize(self, is_train):
# use cfg.INPUT to make sure data loader doesn't resize the image
if is_train:
assert len(self.cfg.INPUT.MAX_SIZE_TRAIN) == 1
h = self.cfg.INPUT.MIN_SIZE_TRAIN[0]
w = self.cfg.INPUT.MAX_SIZE_TRAIN
else:
h = self.cfg.INPUT.MIN_SIZE_TEST
w = self.cfg.INPUT.MAX_SIZE_TEST
return h, w
def _get_test_image_sizes(self, is_train):
"""override this method to use other image size strategy"""
return self._get_test_image_sizes_default(is_train)
def setup_custom_test(self):
"""
Override this when using different runner, using different base config file,
or setting specific config for certain test.
"""
self.runner = GeneralizedRCNNRunner()
self.cfg = self.runner.get_default_cfg()
# subclass can call: self.cfg.merge_from_file(...)
def force_apply_overwrite_opts(self):
"""
Recommend only overriding this for a group of tests, while indivisual test
should have its own `setup_custom_test`.
"""
# update config to make the model run fast
self.cfg.merge_from_list(get_quick_test_config_opts())
# forcing test on CPU
self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
@contextlib.contextmanager
def _create_data_loader(self, is_train):
"""
Creating the data loader used for the test case. Note that it's better
to use "fake" data for quick test and isolating I/O.
"""
image_height, image_width = self._get_test_image_sizes(is_train=False)
with create_detection_data_loader_on_toy_dataset(
self.cfg,
image_height,
image_width,
is_train=is_train,
runner=self.runner,
) as data_loader:
yield data_loader
def _test_export(self, predictor_type, compare_match=True):
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
# TODO: the export may change model it self, need to fix this
model_to_export = copy.deepcopy(self.test_model)
predictor_path = convert_and_export_predictor(
self.cfg,
model_to_export,
predictor_type,
self.test_dir,
data_loader,
)
predictor = create_predictor(predictor_path)
predictor_outputs = predictor(inputs)
_validate_outputs(inputs, predictor_outputs)
if compare_match:
with torch.no_grad():
pytorch_outputs = self.test_model(inputs)
from detectron2.utils.testing import assert_instances_allclose
assert_instances_allclose(
predictor_outputs[0]["instances"],
pytorch_outputs[0]["instances"],
size_as_tensor=True,
)
return predictor_path
# TODO: add test_train
def _test_inference(self):
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
with torch.no_grad():
outputs = self.test_model(inputs)
_validate_outputs(inputs, outputs)
|
d2go-main
|
d2go/utils/testing/rcnn_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.testing.data_loader_helper import create_local_dataset
from detectron2.structures import Boxes, ImageList, Instances
from torch.ao.quantization.quantize_fx import convert_fx, prepare_qat_fx
@META_ARCH_REGISTRY.register()
class DetMetaArchForTest(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
# weights that will be updated in forward() during training, use to simulate
# weight udpates in optimization step
self.register_buffer("scale_weight", torch.Tensor([0.0]))
@property
def device(self):
return self.conv.weight.device
def forward(self, inputs):
if not self.training:
return self.inference(inputs)
images = [x["image"].to(self.device) for x in inputs]
images = ImageList.from_tensors(images, 1)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
# simulate weight updates
self.scale_weight.fill_(1.0)
return {"loss": ret.norm()}
def inference(self, inputs):
instance = Instances((10, 10))
instance.pred_boxes = Boxes(
torch.tensor([[2.5, 2.5, 7.5, 7.5]], device=self.device) * self.scale_weight
)
instance.scores = torch.tensor([0.9], device=self.device)
instance.pred_classes = torch.tensor([1], dtype=torch.int32, device=self.device)
ret = [{"instances": instance}]
return ret
def custom_prepare_fx(self, cfg, is_qat, example_input=None):
example_inputs = (torch.rand(1, 3, 3, 3),)
self.avgpool = prepare_qat_fx(
self.avgpool,
{"": set_backend_and_create_qconfig(cfg, is_train=self.training)},
example_inputs,
)
def convert_fx_callback(model):
model.avgpool = convert_fx(model.avgpool)
return model
return self, convert_fx_callback
def get_det_meta_arch_cfg(cfg, dataset_name, output_dir):
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "DetMetaArchForTest"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 5
cfg.SOLVER.STEPS = [2]
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 1
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.REFERENCE_WORLD_SIZE = 0
cfg.OUTPUT_DIR = output_dir
return cfg
def create_detection_cfg(runner, output_dir):
ds_name = create_local_dataset(output_dir, 5, 10, 10)
cfg = runner.get_default_cfg()
return get_det_meta_arch_cfg(cfg, ds_name, output_dir)
|
d2go-main
|
d2go/utils/testing/meta_arch_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
d2go-main
|
d2go/utils/testing/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import itertools
import json
import math
import os
import uuid
from d2go.data.datasets import register_dataset_split
from d2go.runner import create_runner
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
from PIL import Image
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
def create_toy_dataset(
image_generator, num_images, num_classes=-1, num_keypoints=0, is_rotated=False
):
"""given image_generator, create a dataset with toy annotations and catagories"""
categories = []
images = []
annotations = []
meta_data = {}
if num_classes == -1:
num_classes = num_images
for i in range(num_images):
image_generator.prepare_image(i)
image_dict = image_generator.get_image_dict(i)
width = image_dict["width"]
height = image_dict["height"]
images.append(image_dict)
if i < num_classes:
categories.append({"name": "class_{}".format(i), "id": i})
bbox = (
[width / 4, height / 4, width / 2, height / 2] # XYWH_ABS
if not is_rotated
else [width / 2, height / 2, width / 2, height / 2, 45] # cXcYWHO_ABS
)
keypoints = list(
itertools.chain.from_iterable(
[
math.cos(2 * math.pi * x / num_keypoints) * width / 4 + width / 2,
math.sin(2 * math.pi * x / num_keypoints) * height / 4 + height / 2,
1,
]
for x in range(num_keypoints)
)
)
no_pts = 10
segmentation = list(
itertools.chain.from_iterable(
[
math.cos(2 * math.pi * x / no_pts) * width / 4 + width / 2,
math.sin(2 * math.pi * x / no_pts) * height / 4 + height / 2,
]
for x in range(no_pts)
)
)
annotations.append(
{
"image_id": i,
"category_id": i % num_classes,
"id": i + 1,
"bbox": bbox,
"keypoints": keypoints,
"area": width * height,
"iscrowd": 0,
"ignore": 0,
"segmentation": [segmentation],
}
)
if num_keypoints > 0:
keypoint_names = [f"kp_{idx}" for idx in range(num_keypoints)]
meta_data.update({"keypoint_names": keypoint_names, "keypoint_flip_map": ()})
return (
{"categories": categories, "images": images, "annotations": annotations},
meta_data,
)
@contextlib.contextmanager
def _register_toy_dataset(
dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
):
json_dataset, meta_data = create_toy_dataset(
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
json_file = os.path.join(tmp_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: image_generator.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
register_dataset_split(dataset_name, split_dict)
try:
yield
finally:
DatasetCatalog.remove(dataset_name)
MetadataCatalog.remove(dataset_name)
@contextlib.contextmanager
def register_toy_coco_dataset(
dataset_name, num_images=3, image_size=(5, 10), num_classes=-1, num_keypoints=0
):
width, height = image_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
with _register_toy_dataset(
dataset_name,
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
):
yield
def create_local_dataset(
out_dir,
num_images,
image_width,
image_height,
num_classes=-1,
num_keypoints=0,
is_rotated=False,
):
dataset_name = "_test_ds_" + str(uuid.uuid4())
img_gen = LocalImageGenerator(out_dir, image_width, image_height)
json_dataset, meta_data = create_toy_dataset(
img_gen,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
json_file = os.path.join(out_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: img_gen.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
if is_rotated:
split_dict["evaluator_type"] = "rotated_coco"
register_dataset_split(dataset_name, split_dict)
return dataset_name
class LocalImageGenerator:
def __init__(self, image_dir, width, height):
self._width = width
self._height = height
self._image_dir = image_dir
def get_image_dir(self):
return self._image_dir
def get_image_dict(self, i):
return {
"file_name": "{}.jpg".format(i),
"width": self._width,
"height": self._height,
"id": i,
}
def prepare_image(self, i):
image = Image.new("RGB", (self._width, self._height))
image.save(os.path.join(self._image_dir, self.get_image_dict(i)["file_name"]))
@contextlib.contextmanager
def create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train, runner=None
):
"""
Args:
cfg (CfgNode): the config used to create data loader, it can control things like
resizing, augmentation.
height, width (int): the height/width of the image files (not the resized image size)
is_train (bool): training or testing
"""
if runner is None:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
# change dataset name to toy dataset
cfg.DATASETS.TRAIN = ["_toy_dataset_train_"]
cfg.DATASETS.TEST = ["_toy_dataset_test_"]
if is_train:
with register_toy_coco_dataset(
"_toy_dataset_train_", num_images=3, image_size=(width, height)
):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_coco_dataset(
"_toy_dataset_test_", num_images=3, image_size=(width, height)
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="_toy_dataset_test_"
)
yield test_loader
|
d2go-main
|
d2go/utils/testing/data_loader_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytorch_lightning as pl # type: ignore
from detectron2.utils.events import EventStorage
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
def get_lt_trainer(output_dir: str, cfg):
checkpoint_callback = ModelCheckpoint(dirpath=output_dir, save_last=True)
return pl.Trainer(
max_epochs=10**8,
max_steps=cfg.SOLVER.MAX_ITER,
val_check_interval=cfg.TEST.EVAL_PERIOD
if cfg.TEST.EVAL_PERIOD > 0
else cfg.SOLVER.MAX_ITER,
callbacks=[checkpoint_callback],
logger=False,
)
def lt_train(task, trainer):
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
def lt_test(task, trainer):
with EventStorage() as storage:
task.storage = storage
trainer.test(task)
return task.eval_res
|
d2go-main
|
d2go/utils/testing/lightning_train_helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import os
import socket
import uuid
from functools import wraps
from tempfile import TemporaryDirectory
from typing import Optional
import torch
from d2go.distributed import distributed_worker, DistributedParams
def get_resource_path(file: Optional[str] = None):
path_list = [
os.path.dirname(importlib.import_module("d2go.tests").__file__),
"resources",
]
if file is not None:
path_list.append(file)
return os.path.join(*path_list)
def skip_if_no_gpu(func):
"""Decorator that can be used to skip GPU tests on non-GPU machines."""
func.skip_if_no_gpu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
return
if torch.cuda.device_count() <= 0:
return
return func(*args, **kwargs)
return wrapper
def enable_ddp_env(backend="gloo"):
def _enable_ddp_env(func):
@wraps(func)
def wrapper(*args, **kwargs):
def find_free_port() -> str:
s = socket.socket()
s.bind(("localhost", 0)) # Bind to a free port provided by the host.
return str(s.getsockname()[1])
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = find_free_port()
return distributed_worker(
main_func=func,
args=args,
kwargs=kwargs,
backend=backend,
init_method="file:///tmp/detectron2go_test_ddp_init_{}".format(
uuid.uuid4().hex
),
dist_params=DistributedParams(
local_rank=0,
machine_rank=0,
global_rank=0,
num_processes_per_machine=1,
world_size=1,
),
return_save_file=None, # don't save file
)
return wrapper
return _enable_ddp_env
def tempdir(func):
"""A decorator for creating a tempory directory that is cleaned up after function execution."""
@wraps(func)
def wrapper(self, *args, **kwargs):
with TemporaryDirectory() as temp:
return func(self, temp, *args, **kwargs)
return wrapper
|
d2go-main
|
d2go/utils/testing/helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pyre-unsafe
from typing import Optional
import torch
from pytorch_lightning import LightningModule
from torch.utils.data.dataset import Dataset
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class TestModule(LightningModule):
def __init__(self, epoch_min_loss_override: Optional[int] = None):
"""LightningModule for testing purposes
Args:
epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum
validation loss for testing purposes (zero based). If None this is ignored. Defaults to None.
"""
super().__init__()
self.layer = torch.nn.Linear(in_features=32, out_features=2)
self.another_layer = torch.nn.Linear(in_features=2, out_features=2)
self.epoch_min_loss_override = epoch_min_loss_override
def forward(self, x):
x = self.layer(x)
return self.another_layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def training_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss, "checkpoint_on": loss.detach()}
def validation_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss, "checkpoint_on": loss.detach()}
def test_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss}
def training_epoch_end(self, outputs) -> None:
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("avg_loss", avg_loss)
def validation_epoch_end(self, outputs) -> None:
avg_val_loss = torch.stack(
[torch.randn(1, requires_grad=True) for _ in outputs]
).mean()
# For testing purposes allow a nominated epoch to have a low loss
if self.current_epoch == self.epoch_min_loss_override:
avg_val_loss -= 1e10
self.log("val_loss", avg_val_loss)
self.log("checkpoint_on", avg_val_loss)
def test_epoch_end(self, outputs) -> None:
avg_loss = torch.stack(
[torch.randn(1, requires_grad=True) for _ in outputs]
).mean()
self.log("val_loss", avg_loss)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
|
d2go-main
|
d2go/utils/testing/lightning_test_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Optional
import pkg_resources
import torch
from d2go.runner import create_runner
from d2go.utils.launch_environment import MODEL_ZOO_STORAGE_PREFIX
from detectron2.checkpoint import DetectionCheckpointer
class _ModelZooUrls(object):
"""
Mapping from names to officially released D2Go pre-trained models.
"""
CONFIG_PATH_TO_URL_SUFFIX = {
"faster_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"faster_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"faster_rcnn_fbnetv3g_fpn.yaml": "250356938/model_0374999.pth",
"mask_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"mask_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"mask_rcnn_fbnetv3g_fpn.yaml": "287445123/model_0409999.pth",
"keypoint_rcnn_fbnetv3a_dsmask_C4.yaml": "250430934/model_0389999.pth",
}
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
name = config_path.replace(".yaml", "")
if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return MODEL_ZOO_STORAGE_PREFIX + suffix
raise RuntimeError("{} not available in Model Zoo!".format(name))
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"d2go", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
def get_config(
config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"
):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode: a config object
"""
cfg_file = get_config_file(config_path)
runner = create_runner(runner)
cfg = runner.get_default_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
def get(
config_path,
trained: bool = False,
device: Optional[str] = None,
runner="d2go.runner.GeneralizedRCNNRunner",
):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): see :func:`get_config`.
device (str or None): overwrite the device in config, if given.
Returns:
nn.Module: a d2go model. Will be in training mode.
Example:
::
from d2go import model_zoo
model = model_zoo.get("faster_rcnn_fbnetv3a_C4.yaml", trained=True)
"""
cfg = get_config(config_path, trained)
if device is not None:
cfg.MODEL.DEVICE = device
elif not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
runner = create_runner(runner)
model = runner.build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
return model
|
d2go-main
|
d2go/model_zoo/model_zoo.py
|
d2go-main
|
d2go/model_zoo/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List
import detectron2.utils.comm as comm
import numpy as np
import torch
from d2go.config import CfgNode as CN, temp_defrost
from detectron2.engine import hooks
from detectron2.layers import ShapeSpec
from detectron2.modeling import GeneralizedRCNN
from detectron2.modeling.anchor_generator import (
ANCHOR_GENERATOR_REGISTRY,
BufferList,
DefaultAnchorGenerator,
)
from detectron2.modeling.proposal_generator.rpn import RPN
from detectron2.structures.boxes import Boxes
logger = logging.getLogger(__name__)
def add_kmeans_anchors_cfg(_C):
_C.MODEL.KMEANS_ANCHORS = CN()
_C.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = False
_C.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 0
_C.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 0
_C.MODEL.KMEANS_ANCHORS.DATASETS = ()
_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
_C.MODEL.KMEANS_ANCHORS.RNG_SEED = 3
return _C
def compute_kmeans_anchors_hook(runner, cfg):
"""
This function will create a before_train hook, it will:
1: create a train loader using provided KMEANS_ANCHORS.DATASETS.
2: collecting statistics of boxes using outputs from train loader, use up
to KMEANS_ANCHORS.NUM_TRAINING_IMG images.
3: compute K-means using KMEANS_ANCHORS.NUM_CLUSTERS clusters
4: update the buffers in anchor_generator.
"""
def before_train_callback(trainer):
if not cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON:
return
new_cfg = cfg.clone()
with temp_defrost(new_cfg):
new_cfg.DATASETS.TRAIN = cfg.MODEL.KMEANS_ANCHORS.DATASETS
data_loader = runner.build_detection_train_loader(new_cfg)
anchors = compute_kmeans_anchors(cfg, data_loader)
anchors = anchors.tolist()
assert isinstance(trainer.model, GeneralizedRCNN)
assert isinstance(trainer.model.proposal_generator, RPN)
anchor_generator = trainer.model.proposal_generator.anchor_generator
assert isinstance(anchor_generator, KMeansAnchorGenerator)
anchor_generator.update_cell_anchors(anchors)
return hooks.CallbackHook(before_train=before_train_callback)
@ANCHOR_GENERATOR_REGISTRY.register()
class KMeansAnchorGenerator(DefaultAnchorGenerator):
"""Generate anchors using pre-computed KMEANS_ANCHORS.COMPUTED_ANCHORS"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
torch.nn.Module.__init__(self)
self.strides = [x.stride for x in input_shape]
self.offset = cfg.MODEL.ANCHOR_GENERATOR.OFFSET
assert 0.0 <= self.offset < 1.0, self.offset
# kmeans anchors
num_features = len(cfg.MODEL.RPN.IN_FEATURES)
assert num_features == 1, "Doesn't support multiple feature map"
# NOTE: KMEANS anchors are only computed at training time, when initialized,
# set anchors to correct shape but invalid value as place holder.
computed_anchors = [[float("Inf")] * 4] * cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS
cell_anchors = [torch.Tensor(computed_anchors)]
self.cell_anchors = BufferList(cell_anchors)
def update_cell_anchors(self, computed_anchors):
assert len(self.cell_anchors) == 1
for buf in self.cell_anchors.buffers():
assert len(buf) == len(computed_anchors)
buf.data = torch.Tensor(computed_anchors).to(buf.device)
logger.info("Updated cell anchors")
def forward(self, *args, **kwargs):
for base_anchors in self.cell_anchors:
assert torch.isfinite(base_anchors).all(), (
"The anchors are not initialized yet, please providing COMPUTED_ANCHORS"
" when creating the model and/or loading the valid weights."
)
return super().forward(*args, **kwargs)
def collect_boxes_size_stats(data_loader, max_num_imgs, _legacy_plus_one=False):
logger.info(
"Collecting size of boxes, loading up to {} images from data loader ...".format(
max_num_imgs
)
)
# data_loader might be infinite length, thus can't loop all images, using
# max_num_imgs == 0 stands for 0 images instead of whole dataset
assert max_num_imgs >= 0
box_sizes = []
remaining_num_imgs = max_num_imgs
total_batches = 0
for i, batched_inputs in enumerate(data_loader):
total_batches += len(batched_inputs)
batch_size = min(remaining_num_imgs, len(batched_inputs))
batched_inputs = batched_inputs[:batch_size]
for x in batched_inputs:
boxes = x["instances"].gt_boxes # xyxy
assert isinstance(boxes, Boxes)
for t in boxes.tensor:
box_sizes += [[t[2] - t[0], t[3] - t[1]]]
# NOTE: previous implementation didn't apply +1, thus to match
# previous (incorrect) results we have to minus the im_scale
if _legacy_plus_one: # only for matching old tests
im_scale = x["image"].shape[1] / x["height"] # image is chw
box_sizes[-1][0] -= im_scale
box_sizes[-1][1] -= im_scale
estimated_iters = max_num_imgs / total_batches * (i + 1)
remaining_num_imgs -= batch_size
if i % max(1, int(estimated_iters / 20)) == 0:
# log 20 times at most
percentage = 100.0 * i / estimated_iters
logger.info(
"Processed batch {} ({:.2f}%) from data_loader, got {} boxes,"
" remaining number of images: {}/{}".format(
i, percentage, len(box_sizes), remaining_num_imgs, max_num_imgs
)
)
if remaining_num_imgs <= 0:
assert remaining_num_imgs == 0
break
box_sizes = np.array(box_sizes)
logger.info(
"Collected {} boxes from {} images".format(len(box_sizes), max_num_imgs)
)
return box_sizes
def compute_kmeans_anchors(
cfg, data_loader, sort_by_area=True, _stride=0, _legacy_plus_one=False
):
assert (
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG > 0
), "Please provide positive MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG"
num_training_img = cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG
div_i, mod_i = divmod(num_training_img, comm.get_world_size())
num_training_img_i = div_i + (comm.get_rank() < mod_i)
box_sizes_i = collect_boxes_size_stats(
data_loader,
num_training_img_i,
_legacy_plus_one=_legacy_plus_one,
)
all_box_sizes = comm.all_gather(box_sizes_i)
box_sizes = np.concatenate(all_box_sizes)
logger.info("Collected {} boxes from all gpus".format(len(box_sizes)))
assert (
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS > 0
), "Please provide positive MODEL.KMEANS_ANCHORS.NUM_CLUSTERS"
from sklearn.cluster import KMeans # delayed import
default_anchors = (
KMeans(
n_clusters=cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS,
random_state=cfg.MODEL.KMEANS_ANCHORS.RNG_SEED,
)
.fit(box_sizes)
.cluster_centers_
)
anchors = []
for anchor in default_anchors:
w, h = anchor
# center anchor boxes at (stride/2,stride/2)
new_anchors = np.hstack(
(
_stride / 2 - 0.5 * w,
_stride / 2 - 0.5 * h,
_stride / 2 + 0.5 * w,
_stride / 2 + 0.5 * h,
)
)
anchors.append(new_anchors)
anchors = np.array(anchors)
# sort anchors by area
areas = (anchors[:, 2] - anchors[:, 0]) * (anchors[:, 3] - anchors[:, 1])
sqrt_areas = np.sqrt(areas)
if sort_by_area:
indices = np.argsort(sqrt_areas)
anchors = anchors[indices]
sqrt_areas = sqrt_areas[indices].tolist()
display_str = "\n".join(
[
s + "\t sqrt area: {:.2f}".format(a)
for s, a in zip(str(anchors).split("\n"), sqrt_areas)
]
)
logger.info(
"Compuated kmeans anchors (sorted by area: {}):\n{}".format(
sort_by_area, display_str
)
)
return anchors
|
d2go-main
|
d2go/modeling/kmeans_anchors.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from abc import abstractmethod
from typing import List, Tuple
import torch
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
class ModelingHook(object):
"""Modeling hooks provide a way to modify the model during the model building
process. It is simple but allows users to modify the model by creating wrapper,
override member functions, adding additional components, and loss etc.. It
could be used to implement features such as QAT, model transformation for training,
distillation/semi-supervised learning, and customization for loading pre-trained
weights.
"""
def __init__(self, cfg):
self.cfg = cfg
@abstractmethod
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
"""This function will called during the model building process to modify
the behavior of the input model.
The created model will be
model == create meta arch -> model_hook_1.apply(model) ->
model_hook_2.apply(model) -> ...
"""
pass
@abstractmethod
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
"""This function will be called when the users called model.unapply_modeling_hooks()
after training. The main use case of the function is to remove the changes
applied to the model in `apply`. The hooks will be called in reverse order
as follow:
model.unapply_modeling_hooks() == model_hook_N.unapply(model) ->
model_hook_N-1.unapply(model) -> ... -> model_hook_1.unapply(model)
"""
pass
def _build_modeling_hooks(cfg, hook_names: List[str]) -> List[ModelingHook]:
"""Build the hooks from cfg"""
ret = [MODELING_HOOK_REGISTRY.get(name)(cfg) for name in hook_names]
return ret
def _unapply_modeling_hook(
model: torch.nn.Module, hooks: List[ModelingHook]
) -> torch.nn.Module:
"""Call unapply on the hooks in reversed order"""
for hook in reversed(hooks):
model = hook.unapply(model)
return model
def _apply_modeling_hooks(
model: torch.nn.Module, hooks: List[ModelingHook]
) -> torch.nn.Module:
"""Apply hooks on the model, users could call model.unapply_modeling_hooks()
to return the model that removes all the hooks
"""
if len(hooks) == 0:
return model
for hook in hooks:
model = hook.apply(model)
assert not hasattr(model, "_modeling_hooks")
model._modeling_hooks = hooks
def _unapply_modeling_hooks(self):
assert hasattr(self, "_modeling_hooks")
model = _unapply_modeling_hook(self, self._modeling_hooks)
return model
# add a function that could be used to unapply the modeling hooks
assert not hasattr(model, "unapply_modeling_hooks")
model.unapply_modeling_hooks = _unapply_modeling_hooks.__get__(model)
return model
def build_and_apply_modeling_hooks(
model: torch.nn.Module, cfg, hook_names: List[str]
) -> Tuple[torch.nn.Module, List[ModelingHook]]:
"""Build modeling hooks from cfg and apply hooks on the model. Users could
call model.unapply_modeling_hooks() to return the model that removes all
the hooks.
"""
hooks = _build_modeling_hooks(cfg, hook_names)
model = _apply_modeling_hooks(model, hooks)
return model, hooks
|
d2go-main
|
d2go/modeling/modeling_hook.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# misc.py
# modules that are used in different places but are not a specific type (e.g., backbone)
from typing import Any, Callable, Optional
import torch
import torch.nn as nn
class SplitAndConcat(nn.Module):
"""Split the data from split_dim and concatenate in concat_dim.
@param split_dim from which axis the data will be chunk
@param concat_dim to which axis the data will be concatenated
@param chunk size of the data to be chunk/concatenated
"""
def __init__(self, split_dim: int = 1, concat_dim: int = 0, chunk: int = 2):
super(SplitAndConcat, self).__init__()
self.split_dim = split_dim
self.concat_dim = concat_dim
self.chunk = chunk
def forward(self, x):
x = torch.chunk(x, self.chunk, dim=self.split_dim)
x = torch.cat(x, dim=self.concat_dim)
return x
def extra_repr(self):
return (
f"split_dim={self.split_dim}, concat_dim={self.concat_dim}, "
f"chunk={self.chunk}"
)
class AddCoordChannels(nn.Module):
"""Appends coordinate location values to the channel dimension.
@param with_r include radial distance from centroid as additional channel (default: False)
"""
def __init__(self, with_r: bool = False) -> None:
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape
device = input_tensor.device
xx_ones = torch.ones([1, 1, 1, dim_x], dtype=torch.int32)
yy_ones = torch.ones([1, 1, 1, dim_y], dtype=torch.int32)
xx_range = torch.arange(dim_y, dtype=torch.int32)
yy_range = torch.arange(dim_x, dtype=torch.int32)
xx_range = xx_range[None, None, :, None]
yy_range = yy_range[None, None, :, None]
xx_channel = torch.matmul(xx_range, xx_ones)
yy_channel = torch.matmul(yy_range, yy_ones)
# transpose y
yy_channel = yy_channel.permute(0, 1, 3, 2)
xx_channel = xx_channel.float() / (dim_y - 1)
yy_channel = yy_channel.float() / (dim_x - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1)
yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1)
out = torch.cat(
[input_tensor, xx_channel.to(device), yy_channel.to(device)], dim=1
)
if self.with_r:
rr = torch.sqrt(
torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2)
)
out = torch.cat([out, rr], dim=1)
return out
def inplace_delegate(
self,
api_name: str,
sub_module_name: str,
setter_fn: Optional[Callable],
*args,
**kwargs,
) -> Any:
"""Helper function to delegate API calls to its submodule"""
sub_module = getattr(self, sub_module_name)
api_name = f"delegate_{api_name}"
if hasattr(sub_module, api_name):
func = getattr(sub_module, api_name)
orig_ret = func(*args, **kwargs)
if setter_fn is None:
# Assume the return of `func` will replace the submodule
setattr(self, sub_module_name, orig_ret)
return self
else:
return setter_fn(self, sub_module_name, orig_ret)
else:
raise RuntimeError(
f"It seems the {sub_module_name} doesn't implement {api_name},"
" quantization might fail."
)
|
d2go-main
|
d2go/modeling/misc.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List
import numpy as np
import torch
from d2go.config import CfgNode as CN
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.layers import cat
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.utils.registry import Registry
from mobile_cv.torch.utils_toffee.alias import alias
from torch import nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
SUBCLASS_FETCHER_REGISTRY = Registry("SUBCLASS_FETCHER")
def add_subclass_configs(cfg):
_C = cfg
_C.MODEL.SUBCLASS = CN()
_C.MODEL.SUBCLASS.SUBCLASS_ON = False
_C.MODEL.SUBCLASS.NUM_SUBCLASSES = 0 # must be set
_C.MODEL.SUBCLASS.NUM_LAYERS = 1
_C.MODEL.SUBCLASS.SUBCLASS_ID_FETCHER = "SubclassFetcher" # ABC, must be set
_C.MODEL.SUBCLASS.SUBCLASS_MAPPING = (
[]
) # subclass mapping from model output to annotation
class SubclassFetcher(ABC):
"""Fetcher class to read subclass id annotations from dataset and prepare for train/eval.
Subclass this and register with `@SUBCLASS_FETCHER_REGISTRY.register()` decorator
to use with custom projects.
"""
def __init__(self, cfg):
raise NotImplementedError()
@property
@abstractmethod
def subclass_names(self) -> List[str]:
"""Overwrite this member with any new mappings' subclass names, which
may be useful for specific evaluation purposes.
len(self.subclass_names) should be equal to the expected number
of subclass head outputs (cfg.MODEL.SUBCLASS.NUM_SUBCLASSES + 1).
"""
pass
def remap(self, subclass_id: int) -> int:
"""Map subclass ids read from dataset to new label id"""
return subclass_id
def fetch_subclass_ids(self, dataset_dict: Dict[str, Any]) -> List[int]:
"""Get all the subclass_ids in a dataset dict"""
extras_list = [anno.get("extras") for anno in dataset_dict["annotations"]]
subclass_ids = [extras["subclass_id"] for extras in extras_list]
return subclass_ids
@D2GO_DATA_MAPPER_REGISTRY.register()
class SubclassDatasetMapper(D2GoDatasetMapper):
"""
Wrap any dataset mapper, encode gt_subclasses to the instances.
"""
def __init__(self, cfg, is_train, tfm_gens=None, subclass_fetcher=None):
super().__init__(cfg, is_train=is_train, tfm_gens=tfm_gens)
if subclass_fetcher is None:
fetcher_name = cfg.MODEL.SUBCLASS.SUBCLASS_ID_FETCHER
self.subclass_fetcher = SUBCLASS_FETCHER_REGISTRY.get(fetcher_name)(cfg)
logger.info(
f"Initialized {self.__class__.__name__} with "
f"subclass fetcher '{self.subclass_fetcher.__class__.__name__}'"
)
else:
assert isinstance(subclass_fetcher, SubclassFetcher), subclass_fetcher
self.subclass_fetcher = subclass_fetcher
logger.info(f"Set subclass fetcher to {self.subclass_fetcher}")
# NOTE: field doesn't exist when loading a (old) caffe2 model.
# self.subclass_on = cfg.MODEL.SUBCLASS.SUBCLASS_ON
self.subclass_on = True
def _original_call(self, dataset_dict):
"""
Map the dataset dict with D2GoDatasetMapper, then augment with subclass gt tensors.
"""
# Transform removes key 'annotations' from the dataset dict
mapped_dataset_dict = super()._original_call(dataset_dict)
if self.is_train and self.subclass_on:
subclass_ids = self.subclass_fetcher.fetch_subclass_ids(dataset_dict)
subclasses = torch.tensor(subclass_ids, dtype=torch.int64)
mapped_dataset_dict["instances"].gt_subclasses = subclasses
return mapped_dataset_dict
def build_subclass_head(cfg, in_chann, out_chann):
# fully connected layers: n-1 in_chann x in_chann layers, and 1 in_chann x out_chann layer
layers = [
nn.Linear(in_chann, in_chann) for _ in range(cfg.MODEL.SUBCLASS.NUM_LAYERS - 1)
]
layers.append(nn.Linear(in_chann, out_chann))
return nn.Sequential(*layers)
@ROI_HEADS_REGISTRY.register()
class StandardROIHeadsWithSubClass(StandardROIHeads):
"""
A Standard ROIHeads which contains an addition of subclass head.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self.subclass_on = cfg.MODEL.SUBCLASS.SUBCLASS_ON
if not self.subclass_on:
return
self.num_subclasses = cfg.MODEL.SUBCLASS.NUM_SUBCLASSES
self.subclass_head = build_subclass_head(
cfg, self.box_head.output_shape.channels, self.num_subclasses + 1
)
for layer in self.subclass_head:
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0.0)
def forward(self, images, features, proposals, targets=None):
"""
Same as StandardROIHeads.forward but add logic for subclass.
"""
if not self.subclass_on:
return super().forward(images, features, proposals, targets)
# --- start copy -------------------------------------------------------
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
# NOTE: `has_gt` = False for negatives and we must manually register `gt_subclasses`,
# because custom gt_* fields will not be automatically registered in sampled proposals.
for pp_per_im in proposals:
if not pp_per_im.has("gt_subclasses"):
background_subcls_idx = 0
pp_per_im.gt_subclasses = torch.cuda.LongTensor(
len(pp_per_im)
).fill_(background_subcls_idx)
del targets
features_list = [features[f] for f in self.in_features]
box_features = self.box_pooler(
features_list, [x.proposal_boxes for x in proposals]
)
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
# --- end copy ---------------------------------------------------------
# NOTE: don't delete box_features, keep it temporarily
# del box_features
box_features = box_features.view(
box_features.shape[0], np.prod(box_features.shape[1:])
)
pred_subclass_logits = self.subclass_head(box_features)
if self.training:
losses = self.box_predictor.losses(predictions, proposals)
# During training the proposals used by the box head are
# used by the mask, keypoint (and densepose) heads.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
# subclass head
gt_subclasses = cat([p.gt_subclasses for p in proposals], dim=0)
loss_subclass = F.cross_entropy(
pred_subclass_logits, gt_subclasses, reduction="mean"
)
losses.update({"loss_subclass": loss_subclass})
return proposals, losses
else:
pred_instances, kept_indices = self.box_predictor.inference(
predictions, proposals
)
# During inference cascaded prediction is used: the mask and keypoints
# heads are only applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
# subclass head
probs = F.softmax(pred_subclass_logits, dim=-1)
for pred_instances_i, kept_indices_i in zip(pred_instances, kept_indices):
pred_instances_i.pred_subclass_prob = torch.index_select(
probs,
dim=0,
index=kept_indices_i.to(torch.int64),
)
if torch.onnx.is_in_onnx_export():
assert len(pred_instances) == 1
pred_instances[0].pred_subclass_prob = alias(
pred_instances[0].pred_subclass_prob, "subclass_prob_nms"
)
return pred_instances, {}
|
d2go-main
|
d2go/modeling/subclass.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import logging
from contextlib import contextmanager
from typing import Iterator, List
import torch
from detectron2.engine.train_loop import HookBase
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
_CHECKPOINT_PREFIX,
)
logger = logging.getLogger(__name__)
class EMAState(object):
def __init__(self, include_frozen=True, include_buffer=True):
self.include_frozen = include_frozen
self.include_buffer = include_buffer
self.state = {}
# HACK: This hack is needed to strip checkpoint wrapper prefix from fqns so it doesn't affect loading.
# TODO: Remove this hack by rewriting EMAState to use model.state_dict()
self.prefix_to_remove = [_CHECKPOINT_PREFIX]
@classmethod
def FromModel(cls, model: torch.nn.Module, device: str = "", **kwargs):
ret = cls(**kwargs)
ret.save_from(model, device)
return ret
def save_from(self, model: torch.nn.Module, device: str = ""):
"""Save model state from `model` to this object"""
for name, val in self.get_model_state_iterator(model):
val = val.detach().clone()
self.state[name] = val.to(device) if device else val
def apply_to(self, model: torch.nn.Module):
"""Apply state to `model` from this object"""
with torch.no_grad():
for name, val in self.get_model_state_iterator(model):
assert (
name in self.state
), f"Name {name} not existed, available names {self.state.keys()}"
val.copy_(self.state[name])
@contextmanager
def apply_and_restore(self, model):
old_state = EMAState.FromModel(model, self.device)
self.apply_to(model)
yield old_state
old_state.apply_to(model)
def get_ema_model(self, model):
ret = copy.deepcopy(model)
self.apply_to(ret)
return ret
@property
def device(self):
if not self.has_inited():
return None
return next(iter(self.state.values())).device
def to(self, device):
for name in self.state:
self.state[name] = self.state[name].to(device)
return self
def has_inited(self):
return self.state
def clear(self):
self.state.clear()
return self
def _get_model_parameter_iterator(self, model):
"""
Return iterator for model parameters. Remove frozen parameters if needed.
"""
for name, params in model.named_parameters():
if params.requires_grad or self.include_frozen:
yield name, params
def get_model_state_iterator(self, model):
param_iter = self._get_model_parameter_iterator(model)
if self.include_buffer:
param_iter = itertools.chain(param_iter, model.named_buffers())
return _remove_prefix(param_iter, self.prefix_to_remove)
def state_dict(self):
return self.state
def load_state_dict(self, state_dict, strict: bool = True):
self.clear()
for x, y in state_dict.items():
self.state[x] = y
return torch.nn.modules.module._IncompatibleKeys(
missing_keys=[], unexpected_keys=[]
)
def __repr__(self):
ret = f"EMAState(state=[{','.join(self.state.keys())}])"
return ret
class EMAUpdater(object):
"""Model Exponential Moving Average
Keep a moving average of everything in the model state_dict (parameters and
buffers). This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
Note: It's very important to set EMA for ALL network parameters (instead of
parameters that require gradient), including batch-norm moving average mean
and variance. This leads to significant improvement in accuracy.
For example, for EfficientNetB3, with default setting (no mixup, lr exponential
decay) without bn_sync, the EMA accuracy with EMA on params that requires
gradient is 79.87%, while the corresponding accuracy with EMA on all params
is 80.61%.
Also, bn sync should be switched on for EMA.
"""
def __init__(
self,
state: EMAState,
decay: float = 0.999,
device: str = "",
use_lerp: bool = False,
decay_warm_up_factor: int = -1,
):
self.decay = decay
self.device = device
self.state = state
self.use_lerp = use_lerp
self.debug_lerp = False
self._num_updates: int = -1
self.decay_warm_up_factor = decay_warm_up_factor
if self.decay_warm_up_factor >= 0:
self._num_updates = 0
def init_state(self, model):
self.state.clear()
self.state.save_from(model, self.device)
def update(self, model):
# compute decay
decay = self.decay
if self._num_updates >= 0:
self._num_updates += 1
decay = min(
self.decay,
(1 + self._num_updates)
/ (self.decay_warm_up_factor + self._num_updates),
)
# update moving average
with torch.no_grad():
ema_param_list = []
param_list = []
for name, val in self.state.get_model_state_iterator(model):
ema_val = self.state.state[name]
if self.device:
val = val.to(self.device)
if val.dtype in [torch.float32, torch.float16]:
ema_param_list.append(ema_val)
param_list.append(val)
else:
ema_val.copy_(ema_val * decay + val * (1.0 - decay))
self._ema_avg(ema_param_list, param_list, decay)
def _ema_avg(
self,
averaged_model_parameters: List[torch.Tensor],
model_parameters: List[torch.Tensor],
decay: float,
) -> None:
"""
Function to perform exponential moving average:
x_avg = alpha * x_avg + (1-alpha)* x_t
"""
if self.use_lerp:
if self.debug_lerp:
orig_averaged_model_parameters = torch._foreach_mul(
averaged_model_parameters, decay
)
torch._foreach_add_(
orig_averaged_model_parameters, model_parameters, alpha=1 - decay
)
torch._foreach_lerp_(
averaged_model_parameters, model_parameters, 1.0 - decay
)
if self.debug_lerp:
for (orig_val, lerp_val) in zip(
orig_averaged_model_parameters, averaged_model_parameters
):
assert torch.allclose(orig_val, lerp_val, rtol=1e-4, atol=1e-3)
else:
torch._foreach_mul_(averaged_model_parameters, decay)
torch._foreach_add_(
averaged_model_parameters, model_parameters, alpha=1 - decay
)
def add_model_ema_configs(_C):
_C.MODEL_EMA = type(_C)()
_C.MODEL_EMA.ENABLED = False
_C.MODEL_EMA.DECAY = 0.999
# Whether to include frozen parameters in EMA
_C.MODEL_EMA.INCLUDE_FROZEN = True
# Whether to include model buffers in EMA
_C.MODEL_EMA.INCLUDE_BUFFER = True
# use the same as MODEL.DEVICE when empty
_C.MODEL_EMA.DEVICE = ""
# When True, loading the ema weight to the model when eval_only=True in build_model()
_C.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = False
# Whether to use LERP to compute EMA
_C.MODEL_EMA.USE_LERP = False
# Whether to put EMA to the backward pass
_C.MODEL_EMA.AFTER_BACKWARD = False
# Whether to warmup the EMA update process
_C.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
def _remove_ddp(model):
from torch.nn.parallel import DistributedDataParallel
if isinstance(model, DistributedDataParallel):
return model.module
return model
def _remove_prefix(named_iterator: Iterator, prefix_to_remove: List[str]) -> Iterator:
"""
Remove a list of prefix from a named_module iterator
"""
for name, params in named_iterator:
for prefix in prefix_to_remove:
name = name.replace(prefix, "")
yield name, params
def may_build_model_ema(cfg, model):
if not cfg.MODEL_EMA.ENABLED:
return
model = _remove_ddp(model)
assert not hasattr(
model, "ema_state"
), "Name `ema_state` is reserved for model ema."
model.ema_state = EMAState(
include_frozen=cfg.MODEL_EMA.INCLUDE_FROZEN,
include_buffer=cfg.MODEL_EMA.INCLUDE_BUFFER,
)
logger.info("Using Model EMA.")
def may_get_ema_checkpointer(cfg, model):
if not cfg.MODEL_EMA.ENABLED:
return {}
model = _remove_ddp(model)
return {"ema_state": model.ema_state}
def get_model_ema_state(model):
"""Return the ema state stored in `model`"""
model = _remove_ddp(model)
assert hasattr(model, "ema_state")
ema = model.ema_state
return ema
def apply_model_ema(model, state=None, save_current=False):
"""Apply ema stored in `model` to model and returns a function to restore
the weights are applied
"""
model = _remove_ddp(model)
if state is None:
state = get_model_ema_state(model)
if save_current:
# save current model state
old_state = EMAState.FromModel(model, state.device)
state.apply_to(model)
if save_current:
return old_state
return None
@contextmanager
def apply_model_ema_and_restore(model, state=None):
"""Apply ema stored in `model` to model and returns a function to restore
the weights are applied
"""
model = _remove_ddp(model)
if state is None:
state = get_model_ema_state(model)
old_state = EMAState.FromModel(model, state.device)
state.apply_to(model)
yield old_state
old_state.apply_to(model)
class EMAHook(HookBase):
def __init__(self, cfg, model):
model = _remove_ddp(model)
assert cfg.MODEL_EMA.ENABLED
assert hasattr(
model, "ema_state"
), "Call `may_build_model_ema` first to initilaize the model ema"
self.model = model
self.ema = self.model.ema_state
self.device = cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE
self.is_after_backward = cfg.MODEL_EMA.AFTER_BACKWARD
self.ema_updater = EMAUpdater(
self.model.ema_state,
decay=cfg.MODEL_EMA.DECAY,
device=self.device,
use_lerp=cfg.MODEL_EMA.USE_LERP,
decay_warm_up_factor=cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR,
)
def before_train(self):
if self.ema.has_inited():
self.ema.to(self.device)
else:
self.ema_updater.init_state(self.model)
def after_train(self):
pass
def before_step(self):
pass
def after_backward(self):
if not self.is_after_backward:
return
self._update()
def after_step(self):
if self.is_after_backward:
return
self._update()
def _update(self):
if not self.model.train:
return
self.ema_updater.update(self.model)
|
d2go-main
|
d2go/modeling/ema.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling import ( # noqa
backbone as _backbone,
meta_arch as _meta_arch,
modeldef as _modeldef,
)
|
d2go-main
|
d2go/modeling/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import List
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.misc import _log_api_usage
from detectron2.modeling import META_ARCH_REGISTRY as D2_META_ARCH_REGISTRY
@dataclass
class D2GoModelBuildResult:
"""Class to store the output of build_d2go_model.
It stores the model, a key-value mapping of modeling hooks and can be further
extended with other fields, e.g. state_dict.
"""
# Stores model with applied modeling hooks.
# If modeling hooks (e.g. EMA) are not enabled in config
# the modeling hook will be no-op (e.g. return original model)
model: nn.Module
modeling_hooks: List[mh.ModelingHook]
def build_meta_arch(cfg):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
# initialize the meta-arch and cast to the device
meta_arch = cfg.MODEL.META_ARCHITECTURE
# NOTE: during transition we also check if meta_arch is registered as D2 MetaArch
# TODO: remove this check after Sep 2022.
if meta_arch not in META_ARCH_REGISTRY and meta_arch in D2_META_ARCH_REGISTRY:
raise KeyError(
f"Can't find '{meta_arch}' in D2Go's META_ARCH_REGISTRY, although it is in"
f" D2's META_ARCH_REGISTRY, now D2Go uses its own registry, please register"
f" it in D2Go's META_ARCH_REGISTRY."
)
model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
model.to(torch.device(cfg.MODEL.DEVICE))
_log_api_usage("modeling.meta_arch." + meta_arch)
return model
def build_d2go_model(
cfg: CfgNode,
) -> D2GoModelBuildResult:
model = build_meta_arch(cfg)
modeling_hooks: List[mh.ModelingHook] = []
# apply modeling hooks
# some custom projects bypass d2go's default config so may not have the
# MODELING_HOOKS key
if hasattr(cfg.MODEL, "MODELING_HOOKS"):
hook_names = cfg.MODEL.MODELING_HOOKS
model, modeling_hooks = mh.build_and_apply_modeling_hooks(
model, cfg, hook_names
)
return D2GoModelBuildResult(model=model, modeling_hooks=modeling_hooks)
|
d2go-main
|
d2go/modeling/api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import re
import torch.nn as nn
from detectron2.layers import FrozenBatchNorm2d
logger = logging.getLogger(__name__)
def add_model_freezing_configs(_C):
_C.MODEL.FROZEN_LAYER_REG_EXP = []
def set_requires_grad(model, reg_exps, value):
total_num_parameters = 0
unmatched_parameters = []
unmatched_parameter_names = []
matched_parameters = []
matched_parameter_names = []
for name, parameter in model.named_parameters():
total_num_parameters += 1
matched = False
for frozen_layers_regex in reg_exps:
if re.match(frozen_layers_regex, name):
matched = True
parameter.requires_grad = value
matched_parameter_names.append(name)
matched_parameters.append(parameter)
break
if not matched:
unmatched_parameter_names.append(name)
unmatched_parameters.append(parameter)
logger.info(
"Matched layers (require_grad={}): {}".format(value, matched_parameter_names)
)
logger.info("Unmatched layers: {}".format(unmatched_parameter_names))
return matched_parameter_names, unmatched_parameter_names
def _freeze_matched_bn(module, name, reg_exps, matched_names, unmatched_names):
"""
Recursive function to freeze bn layers that match specified regular expressions.
"""
res = module
# Base case: current module is a leaf node
if len(list(module.children())) == 0:
if isinstance(module, nn.modules.batchnorm._BatchNorm):
matched = False
for frozen_layers_regex in reg_exps:
if re.match(frozen_layers_regex, name):
matched = True
matched_names.append(name)
# Convert to frozen batch norm
res = FrozenBatchNorm2d.convert_frozen_batchnorm(module)
if not matched:
unmatched_names.append(name)
return res
# Recursion: current module has children
for child_name, child in module.named_children():
_name = name + "." + child_name if name != "" else child_name
new_child = _freeze_matched_bn(
child, _name, reg_exps, matched_names, unmatched_names
)
if new_child is not child:
res.add_module(child_name, new_child)
return res
def freeze_matched_bn(module, reg_exps):
"""
Convert matching batchnorm layers in module into FrozenBatchNorm2d.
Args:
module: nn.Module
reg_exps: list of regular expressions to match
Returns:
If module is an instance of batchnorm and it matches the reg exps,
returns a new FrozenBatchNorm2d module.
Otherwise, in-place converts the matching batchnorm child modules to FrozenBatchNorm2d
and returns the main module.
"""
matched_names = []
unmatched_names = []
res = _freeze_matched_bn(module, "", reg_exps, matched_names, unmatched_names)
logger.info("Matched BN layers are frozen: {}".format(matched_names))
logger.info("Unmatched BN layers: {}".format(unmatched_names))
return res
|
d2go-main
|
d2go/modeling/model_freezing_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import d2go.data.transforms.crop as tfm_crop
import d2go.data.transforms.tensor as tfm_tensor
import detectron2.data.transforms as transforms
import torch
from detectron2.data.transforms.augmentation import AugmentationList
from torch import nn
class ImagePooler(nn.Module):
"""Get a subset of image
Returns the transforms that could be used to inverse the image/boxes/keypoints
as well.
Only available for inference. The code is not tracable/scriptable.
"""
def __init__(
self,
resize_type="resize_shortest",
resize_short=None,
resize_max=None,
box_scale_factor=1.0,
):
super().__init__()
assert resize_type in ["resize_shortest", "resize", "None", None]
resizer = None
if resize_type == "resize_shortest":
resizer = transforms.ResizeShortestEdge(resize_short, resize_max)
elif resize_type == "resize":
resizer = transforms.Resize(resize_short)
self.aug = [
tfm_tensor.Tensor2Array(),
tfm_crop.CropBoxAug(box_scale_factor=box_scale_factor),
*([resizer] if resizer else []),
tfm_tensor.Array2Tensor(),
]
def forward(self, x: torch.Tensor, box: torch.Tensor):
"""box: 1 x 4 tensor in XYXY format"""
assert not self.training
assert isinstance(x, torch.Tensor)
assert isinstance(box, torch.Tensor)
# box: 1 x 4 in xyxy format
inputs = tfm_tensor.AugInput(image=x.cpu(), boxes=box.cpu())
transforms = AugmentationList(self.aug)(inputs)
return (
inputs.image.to(x.device),
torch.Tensor(inputs.boxes).to(box.device),
transforms,
)
|
d2go-main
|
d2go/modeling/image_pooler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# This is the main definition of distillation components in D2Go. This
# includes:
# DistillationModelingHook => how we update the student model to obtain
# distillation methods and properties (e.g., override model.forward)
# DistillationAlgorithm => how we define what occurs during distillation
# (e.g., specific forward func, teacher weights updates)
# DistillationHelper => main class users should use to customize their
# distllation (e.g., define how to pseudo label inputs)
#
# We use two additional registries so that users can select their
# distillation algorithms in configs: DISILLATION_ALAGORITHM, DISTILLATION_HELPER
import logging
from abc import abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import (
DISTILLATION_ALGORITHM_REGISTRY,
DISTILLATION_HELPER_REGISTRY,
MODELING_HOOK_REGISTRY,
)
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.mixin import dynamic_mixin, remove_dynamic_mixin
logger = logging.getLogger(__name__)
ModelOutput = Union[None, torch.Tensor, Iterable["ModelOutput"]]
@dataclass
class LayerLossMetadata:
loss: nn.Module
name: str
layer0: str
layer1: str
class PseudoLabeler:
@abstractmethod
def label(self, x):
"""
We expect all pseudolabelers to implement a func called label which
will then be run on the input before passing the func output to the
model
This is typically something like running a teacher model on the input
to generate new ground truth which we can use to override the input
gt
"""
pass
class NoopPseudoLabeler(PseudoLabeler):
def label(self, x):
return x
class RelabelTargetInBatch(PseudoLabeler):
"""Run the teacher model on the batched inputs, replace targets.
We expect the batched_inputs to be a list of dicts:
batched_inputs = [
{"input": ..., "target": ...},
{"input": ..., "target": ...},
...
]
where there is a single label "target" that needs to be replaced
The teacher can take this batch of inputs directly and return a tensor
of size nchw where n corresopnds to the index of the input
We return updated batched_inputs with the new target
new_batched_inputs = [
{"input": ..., "target": teacher_output[0, :]},
{"input": ..., "target": teacher_output[1, :]},
...
]
Note that the output of the teacher is a tensor of NCHW while we assume
the target is CHW. Create a new pseudo_labeler if a different input
output is needed.
"""
def __init__(self, teacher: nn.Module):
"""Assume that a teacher is passed to the psuedolabeler
As an example in distillation, the distillaiton helper should create
or pass along a teacher to the psuedo labeler
"""
self.teacher = teacher
def label(self, batched_inputs: List) -> List:
batched_inputs = [
{"input": d["input"].to(self.teacher.device), "target": d["target"]}
for d in batched_inputs
]
with torch.no_grad():
batched_outputs = self.teacher(batched_inputs)
for i, input in enumerate(batched_inputs):
input["target"] = batched_outputs[i, :]
return batched_inputs
@DISTILLATION_HELPER_REGISTRY.register()
class BaseDistillationHelper:
"""Example of what distillation helper can provide
Users should inherit this class and replace any functions with whatever they
need in order to customize their distillation given a specific distililation
algorithm (e.g., user wants to change the name of the label in the inputs).
The distillation helper is an object passed to the distillation algorithm so
any functionality in the helper can be accessed in the algorithm
"""
def __init__(self, cfg: CN, teacher: nn.Module):
self.cfg = cfg
self.teacher = teacher
def get_pseudo_labeler(self) -> PseudoLabeler:
"""
pseudo_labeler should update the labels in batched_inputs with teacher model
results
This dummy psuedo_labeler returns the batched_inputs without modification
"""
return NoopPseudoLabeler()
def get_teacher(self) -> nn.Module:
"""Return a teacher that can be run by the algorithm"""
return self.teacher
def get_layer_losses(
self, model: Optional[nn.Module] = None
) -> List[LayerLossMetadata]:
"""Return losses that are run on layers
Layer parameters may be dependent on model parameters so option to pass
in a model
"""
return []
def get_preprocess_student_input(self) -> Callable:
"""Return a function that allows user to modify the dataloader output
before passing to the student
The output of this function will be directly passed to the student model.
Example use cases include:
* dataloader returns a large image used by the teacher model but the
student model needs a lower resolution version
* dataloader returns both labeled and unlabeled data and the student
requires labeled data
"""
return lambda x: x
def get_preprocess_teacher_input(self) -> Callable:
"""Return a function that allows user to modify dataloader output before
passing to teacher
The output of this function will be directly passed to the teacher model.
"""
return lambda x: x
def get_combine_losses(self) -> Callable:
"""Return a function that takes as input a dictionary of losses and
modifies the loss as required
The default trainer sums the losses at the end so typically this
function is used to change the relative contribution of losses
Example:
def combine_losses(losses)
alpha = 0.1
losses["nll"] *= alpha
losses["kd_loss"] *= (1 - alpha)
return losses
student_losses = {"nll": ...}
student_losses.update({"kl_loss": ...})
losses = combine_losses(student_losses)
"""
return lambda x: x
def get_preprocess_domain0_input(self) -> Callable:
"""Return a function that allows user to modify the dataloader output
before passing to the model
The output of this function will be directly passed to the model.
Example use cases include:
* dataloader returns a dictionary of real and synthetic images. use
this function to return only the real data (domain0) to the model
"""
return lambda x: x
def get_preprocess_domain1_input(self) -> Callable:
"""Same as get_preprocess_domain0_input but returns domain1 inputs
Example:
* dataloader returns a dictionary of real and synthetic images. use
this function to return only synthetic data (domain1) to the model
"""
return lambda x: x
@DISTILLATION_HELPER_REGISTRY.register()
class ExampleDistillationHelper(BaseDistillationHelper):
"""
This is an example of a user customizing distillation.
We return a pseudo labeler that can be used with a specific project
where the training input is a list of dicts with a label called target
"""
def get_pseudo_labeler(self) -> PseudoLabeler:
return RelabelTargetInBatch(self.teacher)
class BaseDistillationAlgorithm(nn.Module):
"""
Base distillation algorithm
All distillation algorithms will be initialized with the same inputs including the
teacher model, distillation helper and student class. Require user to define forward
which overrides student model forward.
Note that the init is unused when we use mixin. We manually set these attributes in
the modeling hook. However we keep the init to make it clear what attributes the
class will contain.
"""
def dynamic_mixin_init(
self,
distillation_helper: BaseDistillationHelper,
):
# check if we might override user attrs with same name
# add any new distillation method attrs to this list
assert not hasattr(
self, "distillation_helper"
), "Distillation attempting to override attribute that already exists: distillation_helper"
self.distillation_helper = distillation_helper
def remove_dynamic_mixin(self):
del self.distillation_helper
@abstractmethod
def forward(self, *args, **kwargs):
"""User required to override forward to implement distillation"""
# must call super to ensure student forward is used when calling the
# super in the algorithm (i.e., DistillationAlgorithm.super().forward())
# this is because distillation algorithms inherit this base class so
# the MRO of the mixin class is something like:
# [DistillationAlgorithm, BaseDistillationAlgorithm, StudentModel]
# DistillationAlgorithm forward uses super().forward to call the
# student model but the BaseDistillationAlgorithm is the next class
# in the MRO so we make sure to call super on BaseDistillationAlgorithm
# so we can access the StudentModel forward.
return super().forward(*args, **kwargs)
@DISTILLATION_ALGORITHM_REGISTRY.register()
class LabelDistillation(BaseDistillationAlgorithm):
"""Basic distillation uses a teacher model to generate new labels used
by the student
We modify the forward to replace the input labels with teacher outputs when
the model is training and run the student at inference
"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
"""Init pseudo labeler"""
super().dynamic_mixin_init(distillation_helper)
self.pseudo_labeler = self.distillation_helper.get_pseudo_labeler()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
del self.pseudo_labeler
def forward(self, batched_inputs: List):
"""If training, overrides input labels with teacher outputs
During inference, runs the student.
Note: The "student" model can be accessed by calling super(). In order
to run the student forward method, we call super().forward(input) as opposed
to super()(input) as super objects are not callable. We avoid calling
super().__call__(input) as this leads to infinite recursion. We can call
super().forward(input) without worrying about ignoring hooks as we should
be calling this model as model(input) which will then activate the hooks.
"""
if not self.training:
return super().forward(batched_inputs)
new_batched_inputs = self.pseudo_labeler.label(batched_inputs)
return super().forward(new_batched_inputs)
@DISTILLATION_ALGORITHM_REGISTRY.register()
class KnowledgeDistillation(BaseDistillationAlgorithm):
"""Knowledge distillation applies loss over the outputs of the student
and teacher models
"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
"""Note all variables use _ to avoid name conflicts with existing
variable names in the model
Consider adding a check to avoid variable name reuse
"""
super().dynamic_mixin_init(distillation_helper)
self._teacher = WrappedTeacher(self.distillation_helper.get_teacher())
self._student_preprocess_input = (
self.distillation_helper.get_preprocess_student_input()
)
self._teacher_preprocess_input = (
self.distillation_helper.get_preprocess_teacher_input()
)
ll = self.distillation_helper.get_layer_losses(self)
self._layer_losses = register_layer_losses_and_to_device(ll, self)
self._student_cache = record_layers(
self, [ll.layer0 for ll in self._layer_losses]
)
self._teacher_cache = record_layers(
self._teacher.model, [ll.layer1 for ll in self._layer_losses]
)
self._combine_losses = self.distillation_helper.get_combine_losses()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
unrecord_layers(self, [ll.layer0 for ll in self._layer_losses])
unrecord_layers(self._teacher.model, [ll.layer1 for ll in self._layer_losses])
del self._teacher
del self._layer_losses
del self._student_cache
del self._teacher_cache
del self._student_preprocess_input
del self._teacher_preprocess_input
del self._combine_losses
def forward(self, batched_inputs: List):
"""Run teacher, then student and compute losses"""
student_input = self._student_preprocess_input(batched_inputs)
if not self.training:
return super().forward(student_input)
teacher_input = self._teacher_preprocess_input(batched_inputs)
with torch.no_grad():
self._teacher(teacher_input)
student_losses = super().forward(student_input)
distillation_losses = compute_layer_losses(
self._layer_losses, self._student_cache, self._teacher_cache
)
student_losses.update(distillation_losses)
losses = self._combine_losses(student_losses)
return losses
@DISTILLATION_ALGORITHM_REGISTRY.register()
class DomainAdaptation(BaseDistillationAlgorithm):
"""Domain adaptation applies loss over the inputs of domain0 and domain1"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
super().dynamic_mixin_init(distillation_helper)
self._domain0_preprocess_input = (
self.distillation_helper.get_preprocess_domain0_input()
)
self._domain1_preprocess_input = (
self.distillation_helper.get_preprocess_domain1_input()
)
ll = self.distillation_helper.get_layer_losses(self)
self._layer_losses = register_layer_losses_and_to_device(ll, self)
# we ignore the cache dict returned by record_layers as we need to
# manually set the dict at every iteration in the forward
self._domain0_cache = {}
self._domain1_cache = {}
# since domain adaptation uses the same model in both domains, we
# only need to add CachedLayers once
record_layers(self, [ll.layer0 for ll in self._layer_losses])
self._combine_losses = self.distillation_helper.get_combine_losses()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
unrecord_layers(self, [ll.layer0 for ll in self._layer_losses])
del self._layer_losses
del self._domain0_cache
del self._domain1_cache
del self._domain0_preprocess_input
del self._domain1_preprocess_input
del self._combine_losses
def forward(self, batched_inputs: List):
"""Run domain0 input, domain1 input and compute losses"""
domain0_input = self._domain0_preprocess_input(batched_inputs)
if not self.training:
return super().forward(domain0_input)
# run domain0
set_cache_dict(self, self._domain0_cache)
domain0_losses = super().forward(domain0_input)
# run domain1
domain1_input = self._domain1_preprocess_input(batched_inputs)
set_cache_dict(self, self._domain1_cache)
domain1_losses = super().forward(domain1_input)
# calculate losses
domain_adaptation_losses = compute_layer_losses(
self._layer_losses, self._domain0_cache, self._domain1_cache
)
# combine losses
# note we currently assume that the loss combiner uses training iteration
losses = self._combine_losses(
domain0_losses,
domain1_losses,
domain_adaptation_losses,
getattr(self, "_training_iteration", -1),
)
return losses
@MODELING_HOOK_REGISTRY.register()
class DistillationModelingHook(mh.ModelingHook):
"""Wrapper hook that allows us to apply different distillation algorithms
based on config
This is meant to be used after creating a model:
def build_model(cfg):
model = d2_build_model(cfg)
distillation_modeling_hook = DistillationModelingHook(cfg)
d2go.modeling_hook.apply_modeling_hooks(model, distillation_modeling_hook)
The udpated model will then be updated with a forward func that corresponds
to the distillation method in the cfg as well as any new methods
"""
def __init__(self, cfg):
"""
Set the three major components
distillation_algorithm_class => the distillation algorithm to be used, we
only get the class as the apply() will mixin the class
distillation_helper => user customization of the algorithm
teacher => all distillation algorithms utilize an additional model to
modify inputs
"""
super().__init__(cfg)
self.teacher = _build_teacher(cfg)
self.distillation_algorithm_class = DISTILLATION_ALGORITHM_REGISTRY.get(
cfg.DISTILLATION.ALGORITHM
)
self.distillation_helper = DISTILLATION_HELPER_REGISTRY.get(
cfg.DISTILLATION.HELPER
)(cfg, self.teacher)
def apply(self, model: nn.Module) -> nn.Module:
"""Use dynamic mixin to apply the distillation class
As opposed to wrapping the model, dynamic mixin allows us to override the
model methods so that the model retains all existing attributes the user expects
(e.g., if the user thinks their is an attr called model.my_attr then dynamic mixin
retains that property). This has the advantage over directly overriding the model
forward as we can still call the original model forward using super:
old_model: MyModel
new_model: MyDistillationClass = DistillationModelingHook(...).apply(old_model)
class MyDistillationClass:
def forward(self, ...):
# do some processing
...
super().forward(...) # call MyModel.forward
...
"""
logger.info("Applying distillation")
dynamic_mixin(
model,
self.distillation_algorithm_class,
init_dict={
"distillation_helper": self.distillation_helper,
},
)
return model
def unapply(self, model: nn.Module) -> nn.Module:
"""Remove distillation class using dynamic mixin with saved original class"""
remove_dynamic_mixin(model)
return model
def _build_teacher(cfg) -> nn.Module:
"""Create teacher using config settings
Supports torchscript or creating pytorch model using config.
"""
_validate_teacher_config(cfg)
if cfg.DISTILLATION.TEACHER.TYPE == "torchscript":
with PathManager.open(cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME, "rb") as f:
model = torch.jit.load(f)
elif cfg.DISTILLATION.TEACHER.TYPE == "config":
from d2go.runner import import_runner
from d2go.setup import create_cfg_from_cli
# teacher config may be set to cuda
# if user wants to run teacher on cpu only machine by specifying teacher.device,
# need to override device to cpu before building model
if cfg.DISTILLATION.TEACHER.DEVICE:
cfg.DISTILLATION.TEACHER.OVERWRITE_OPTS.extend(
["MODEL.DEVICE", cfg.DISTILLATION.TEACHER.DEVICE]
)
teacher_cfg = create_cfg_from_cli(
cfg.DISTILLATION.TEACHER.CONFIG_FNAME,
cfg.DISTILLATION.TEACHER.OVERWRITE_OPTS,
cfg.DISTILLATION.TEACHER.RUNNER_NAME,
)
runner = import_runner(cfg.DISTILLATION.TEACHER.RUNNER_NAME)()
model = runner.build_model(teacher_cfg, eval_only=True)
elif cfg.DISTILLATION.TEACHER.TYPE == "no_teacher":
model = nn.Identity()
else:
raise ValueError(f"Unexpected teacher type: {cfg.DISTILLATION.TEACHER.TYPE}")
# move teacher to same device as student unless specified
device = torch.device(cfg.DISTILLATION.TEACHER.DEVICE or cfg.MODEL.DEVICE)
model = _set_device(model, device)
model.eval()
return model
def _set_device(model: nn.Module, device: torch.device) -> nn.Module:
"""Set the device of the model
Some D2Go models have device as a property of the model (e.g., GeneralizedRCNN)
whereas others are missing this attribute which is assumed by distillation
to exist (e.g., we may call teacher.device to move inputs)
This helper function guarantees that the model.device attribute exists
and runs model.to(device)
"""
model = model.to(device)
if not hasattr(model, "device"):
model.device = device
return model
def _validate_teacher_config(cfg: CN) -> None:
"""We support torchscript or PyTorch checkpoint as teacher models
If torchscript, need:
* torchscript_filename
If config, needs:
* config_fname
Bypass allowed if setting teacher.type = "no_teacher". This can be
useful in cases where we only have the student model
(e.g., domain adaptation)
"""
if cfg.DISTILLATION.TEACHER.TYPE == "torchscript":
assert (
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME
), "Trying to load torchscript model without fname"
elif cfg.DISTILLATION.TEACHER.TYPE == "config":
assert (
cfg.DISTILLATION.TEACHER.CONFIG_FNAME
), "Trying to load D2Go teacher model without config"
elif cfg.DISTILLATION.TEACHER.TYPE == "no_teacher":
pass
else:
raise ValueError(
f"Unrecognized DISTILLATION.TEACHER.TYPE: {cfg.DISTILLATION.TEACHER.TYPE}"
)
class CachedLayer(nn.Module):
"""Cached layer records the output of a layer
This is meant to be used with dynamic mixin. The layer overrides the forward
of the original layer such that the input and the output is the same but
the output of the layer is saved to a dict that can be retrieved later
"""
def dynamic_mixin_init(
self,
label: str,
cache: Dict[str, ModelOutput],
):
self.label = label
self.cache = cache
def remove_dynamic_mixin(self):
del self.label
del self.cache
def forward(self, *args, **kwargs):
"""Run the original layer and save the output
We clone the output to avoid the case where a subsequent module
runs an inplace operation. However, this limits what the cache
can support as we can only run clone on a tensor so we need to
check the type of the output.
Support of the output type is limited to None type and arbitrary nested
collections of List, Tuple and Dict of tensor.
"""
output = super().forward(*args, **kwargs)
self.cache[self.label] = CachedLayer._clone(output)
return output
@staticmethod
def _clone(output: ModelOutput) -> ModelOutput:
if output is None:
return None
elif isinstance(output, torch.Tensor):
return output.clone()
elif isinstance(output, List) or isinstance(output, Tuple):
cloned_output = []
for x in output:
cloned_output.append(CachedLayer._clone(x))
if isinstance(output, Tuple):
return tuple(cloned_output)
return cloned_output
elif isinstance(output, Dict):
cloned_output = {}
for k, v in output.items():
cloned_output[k] = CachedLayer._clone(v)
return cloned_output
else:
raise ValueError(f"Unexpected type to save: {type(output)}")
def set_cache_dict(model: nn.Module, cache: ModelOutput) -> None:
"""Sets the cache in all CachedLayers to input cache"""
for module in model.modules():
if isinstance(module, CachedLayer):
module.cache = cache
def record_layers(model: nn.Module, layer_names: Set[str]) -> ModelOutput:
"""Save the outputs of layer_names in model
Iterates over all named layers in model, applies cached layer to layers in
layer_names. Returns dict which is used by the cached layers.
"""
cache = {}
for name, module in model.named_modules():
if name in layer_names:
dynamic_mixin(
module,
CachedLayer,
init_dict={"label": name, "cache": cache},
)
return cache
def unrecord_layers(model: nn.Module, layer_names: Set[str]) -> None:
"""Remove cached layers based on the layer_names"""
for name, module in model.named_modules():
if name in layer_names:
remove_dynamic_mixin(module)
def compute_layer_losses(
layer_losses: List[LayerLossMetadata],
layer0_cache: ModelOutput,
layer1_cache: ModelOutput,
) -> Dict[str, torch.Tensor]:
"""Compute loss over layers specified in layer_loss
layer0_cache and layer1_cache should contain the data required to compute
the losses specified in layer_loss
"""
losses = {}
for ll in layer_losses:
if ll.layer0 not in layer0_cache:
raise ValueError(f"Missing saved layer {ll.layer0} in layer0_cache")
if ll.layer1 not in layer1_cache:
raise ValueError(f"Missing saved layer {ll.layer1} in layer1_cache")
losses[ll.name] = ll.loss(layer0_cache[ll.layer0], layer1_cache[ll.layer1])
return losses
class WrappedTeacher:
"""Used to remove the teacher model from the student module list
See: DistillationMiscTests.test_teacher_outside_updated_parameters to get
more details on avoiding adding the teacher as a module
"""
def __init__(self, model: nn.Module):
self.model = model
def __call__(self, *args, **kwargs):
return self.model(*args, **kwargs)
def get_default_kd_image_classification_layer_losses() -> List[LayerLossMetadata]:
"""Return some typical values used in knowledge distillation
Assumes student model is ImageClassificationMetaArch and teacher model is the same
or a wrapped torchscript model with the same output layer name
"""
return [
LayerLossMetadata(
loss=nn.CrossEntropyLoss(),
name="kd",
layer0="classifier",
layer1="", # use empty layer name to indicate last layer
)
]
class DefaultLossCombiner:
"""Returns a weighted sum of the losses based on the name_weight
name_weight is a dictionary indicating the name of the loss and the
weight associated with that loss
Example:
name_weight = {"nll": 0.1, "kd": 0.9}
"""
def __init__(self, name_weight: Dict[str, float]):
self.name_weight = name_weight
def __call__(self, losses: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
output = {}
for k, v in losses.items():
if k not in self.name_weight:
raise ValueError(f"Unexpected weight in loss dict: {k}")
output[k] = v * self.name_weight[k]
return output
def register_layer_losses_and_to_device(
layer_losses: List[LayerLossMetadata], model: nn.Module
) -> List[LayerLossMetadata]:
"""Register loss modules in layerlossemtadata to model and move to device"""
registered_losses = []
for ll in layer_losses:
loss_on_device = ll.loss.to(model.device)
model.add_module(ll.name, loss_on_device)
registered_losses.append(
LayerLossMetadata(
loss_on_device,
ll.name,
ll.layer0,
ll.layer1,
)
)
return registered_losses
|
d2go-main
|
d2go/modeling/distillation.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import inspect
import json
import logging
import math
from typing import Any, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.config.utils import flatten_config_dict
from d2go.export.api import PredictorExportConfig
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import (
GeneralizedRCNN as _GeneralizedRCNN,
ProposalNetwork as _ProposalNetwork,
)
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import PointRendMaskHead
from detectron2.structures import Boxes, Instances, Keypoints, PolygonMasks
from detectron2.utils.events import EventStorage
from detectron2.utils.registry import Registry
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.arch.utils.quantize_utils import (
QuantWrapper,
wrap_non_quant_group_norm,
wrap_quant_subclass,
)
from mobile_cv.predictor.api import FuncInfo
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from torch.ao.quantization.utils import get_fqn_to_example_inputs
logger = logging.getLogger(__name__)
# NOTE: Customized heads are often used in the GeneralizedRCNN, this leads to the needs
# for also customizating export/quant APIs, therefore registries are provided for easy
# override without creating new meta-archs. For other less general meta-arch, this type
# of registries might be over-kill.
RCNN_PREPARE_FOR_EXPORT_REGISTRY = Registry("RCNN_PREPARE_FOR_EXPORT")
RCNN_PREPARE_FOR_QUANT_REGISTRY = Registry("RCNN_PREPARE_FOR_QUANT")
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(_GeneralizedRCNN):
def prepare_for_export(self, cfg, *args, **kwargs):
func = RCNN_PREPARE_FOR_EXPORT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_EXPORT)
return func(self, cfg, *args, **kwargs)
def prepare_for_quant(self, cfg, *args, **kwargs):
func = RCNN_PREPARE_FOR_QUANT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_QUANT)
return func(self, cfg, *args, **kwargs)
def custom_prepare_fx(self, cfg, is_qat, example_input=None):
return default_rcnn_custom_prepare_fx(self, cfg, is_qat, example_input)
def _cast_model_to_device(self, device):
return _cast_detection_model(self, device)
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class ProposalNetwork(_ProposalNetwork):
pass
@RCNN_PREPARE_FOR_EXPORT_REGISTRY.register()
def default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type):
pytorch_model = self
if (
"@c2_ops" in predictor_type
or "caffe2" in predictor_type
or "onnx" in predictor_type
):
from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
c2_compatible_model = C2MetaArch(cfg, pytorch_model)
preprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPreprocessFunc,
params=D2Caffe2MetaArchPreprocessFunc.get_params(cfg, c2_compatible_model),
)
postprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPostprocessFunc,
params=D2Caffe2MetaArchPostprocessFunc.get_params(cfg, c2_compatible_model),
)
preprocess_func = preprocess_info.instantiate()
model_export_kwargs = {}
if "torchscript" in predictor_type:
model_export_kwargs["force_disable_tracing_adapter"] = True
return PredictorExportConfig(
model=c2_compatible_model,
# Caffe2MetaArch takes a single tuple as input (which is the return of
# preprocess_func), data_generator requires all positional args as a tuple.
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type.replace("@c2_ops", "", 1),
model_export_kwargs=model_export_kwargs,
preprocess_info=preprocess_info,
postprocess_info=postprocess_info,
)
else:
do_postprocess = cfg.RCNN_EXPORT.INCLUDE_POSTPROCESS
preprocess_info = FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Preprocess, params={}
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=D2RCNNInferenceWrapper(
pytorch_model,
do_postprocess=do_postprocess,
),
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type,
preprocess_info=preprocess_info,
postprocess_info=FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Postprocess,
params={"detector_postprocess_done_in_model": do_postprocess},
),
)
def _apply_eager_mode_quant(cfg, model):
if isinstance(model, GeneralizedRCNN):
"""Wrap each quantized part of the model to insert Quant and DeQuant in-place"""
# Wrap backbone and proposal_generator
if isinstance(model.backbone, FPN):
# HACK: currently the quantization won't pick up D2's the Conv2d, which is
# used by D2's default FPN (same as FBNetV2FPN), this causes problem if we
# warpping entire backbone as whole. The current solution is only quantizing
# bottom_up and leaving other parts un-quantized. TODO (T109761730): However
# we need to re-visit this if using other (fbnet-based) FPN module since the
# new FPN module might be pikced by quantization.
model.backbone.bottom_up = wrap_quant_subclass(
model.backbone.bottom_up,
n_inputs=1,
n_outputs=len(model.backbone.bottom_up._out_features),
)
else:
model.backbone = wrap_quant_subclass(
model.backbone, n_inputs=1, n_outputs=len(model.backbone._out_features)
)
model.proposal_generator.rpn_head = wrap_quant_subclass(
model.proposal_generator.rpn_head,
n_inputs=len(cfg.MODEL.RPN.IN_FEATURES),
n_outputs=len(cfg.MODEL.RPN.IN_FEATURES) * 2,
)
# Wrap the roi_heads, box_pooler is not quantized
if hasattr(model.roi_heads, "box_head"):
model.roi_heads.box_head = wrap_quant_subclass(
model.roi_heads.box_head,
n_inputs=1,
n_outputs=1,
)
# for faster_rcnn_R_50_C4
if hasattr(model.roi_heads, "res5"):
model.roi_heads.res5 = wrap_quant_subclass(
model.roi_heads.res5,
n_inputs=1,
n_outputs=1,
)
model.roi_heads.box_predictor = wrap_quant_subclass(
model.roi_heads.box_predictor, n_inputs=1, n_outputs=2
)
# Optionally wrap keypoint and mask heads, pools are not quantized
if hasattr(model.roi_heads, "keypoint_head"):
model.roi_heads.keypoint_head = wrap_quant_subclass(
model.roi_heads.keypoint_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
if hasattr(model.roi_heads, "mask_head"):
model.roi_heads.mask_head = wrap_quant_subclass(
model.roi_heads.mask_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
# StandardROIHeadsWithSubClass uses a subclass head
if hasattr(model.roi_heads, "subclass_head"):
q_subclass_head = QuantWrapper(model.roi_heads.subclass_head)
model.roi_heads.subclass_head = q_subclass_head
else:
raise NotImplementedError(
"Eager mode for {} is not supported".format(type(model))
)
# TODO: wrap the normalizer and make it quantizable
# NOTE: GN is not quantizable, assuming all GN follows a quantized conv,
# wrap them with dequant-quant
model = wrap_non_quant_group_norm(model)
return model
def _lcm(x: Optional[int], y: Optional[int]) -> int:
if x is None or x == 0:
return y
if y is None or y == 0:
return x
return x * y // math.gcd(x, y)
def _get_example_rcnn_input(image_tensor_size: int):
def _get_batch():
# example input image
# TODO: do not hard-code channel size 3
image = torch.randn(3, image_tensor_size, image_tensor_size)
# example GT instances
num_instances = 2
gt_boxes = torch.tensor([[0.0, 0.0, 10.0, 10.0]] * num_instances)
gt_boxes = Boxes(gt_boxes)
gt_classes = torch.tensor([0] * num_instances)
polygon = np.array([0.0, 0.0, 10.0, 0.0, 10.0, 10.0]) # x1,y1,x2,y2,x3,y3
gt_masks = PolygonMasks([[polygon]] * num_instances)
# TODO: make keypoints inside box and set visibililty
# TODO: do not hard-code num_keypoints 17
keypoints = torch.randn(num_instances, 17, 3)
gt_keypoints = Keypoints(keypoints)
# NOTE: currenlty supports faster/mask/keypoint RCNN
instances = Instances(
image_size=(10, 10),
gt_boxes=gt_boxes,
gt_classes=gt_classes,
gt_masks=gt_masks,
gt_keypoints=gt_keypoints,
)
return {
# `file_name` and `image_id` are not used, can be any value.
"file_name": "fake_example_image.jpg",
"image_id": 42,
# `height` and `width` are used in post-processing to scale predictions back
# to original size, not used during training.
"height": 10,
"width": 10,
"image": image,
"instances": instances,
# NOTE: proposals are not supported
}
return [_get_batch(), _get_batch()]
def _set_qconfig(model, cfg, is_qat):
model.qconfig = set_backend_and_create_qconfig(cfg, is_train=is_qat)
# skip quantization for point rend head
if (
hasattr(model, "roi_heads")
and hasattr(model.roi_heads, "mask_head")
and isinstance(model.roi_heads.mask_head, PointRendMaskHead)
):
model.roi_heads.mask_head.qconfig = None
logger.info("Setup the model with qconfig:\n{}".format(model.qconfig))
@RCNN_PREPARE_FOR_QUANT_REGISTRY.register()
def default_rcnn_prepare_for_quant(self, cfg):
model = self
_set_qconfig(model, cfg, model.training)
# Modify the model for eager mode
model = _apply_eager_mode_quant(cfg, model)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
return model
def default_rcnn_custom_prepare_fx(self, cfg, is_qat, example_input=None):
model = self
_set_qconfig(model, cfg, is_qat)
# construct example input for FX when not provided
if example_input is None:
assert (
is_qat
), "Currently only (FX mode) QAT requires user-provided `example_input`"
# make sure the image size can be divided by all strides and size_divisibility
required_strides = [model.backbone.size_divisibility] + [
shape_spec.stride for shape_spec in model.backbone.output_shape().values()
]
image_tensor_size = functools.reduce(_lcm, required_strides)
example_input = _get_example_rcnn_input(image_tensor_size)
_fx_quant_prepare(model, cfg, is_qat, example_input)
def convert_fx_callback(model):
return default_rcnn_custom_convert_fx(model, cfg)
return model, convert_fx_callback
def _fx_quant_prepare(self, cfg, is_qat, example_input):
prep_fn = prepare_qat_fx if is_qat else prepare_fx
qconfig = {"": self.qconfig}
assert not isinstance(self.backbone, FPN), "FPN is not supported in FX mode"
with EventStorage() as _: # D2's rcnn requires EventStorage when for loss
with torch.no_grad():
fqn_to_example_inputs = get_fqn_to_example_inputs(self, (example_input,))
self.backbone = prep_fn(
self.backbone,
qconfig,
fqn_to_example_inputs["backbone"],
prepare_custom_config={
"preserved_attributes": ["size_divisibility", "padding_constraints"],
# keep the output of backbone quantized, to avoid
# redundant dequant
# TODO: output of backbone is a dict and currently this will keep all output
# quantized, when we fix the implementation of "output_quantized_idxs"
# we'll need to change this
"output_quantized_idxs": [0],
},
)
self.proposal_generator.rpn_head.rpn_feature = prep_fn(
self.proposal_generator.rpn_head.rpn_feature,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_feature"],
prepare_custom_config={
# rpn_feature expecting quantized input, this is used to avoid redundant
# quant
"input_quantized_idxs": [0]
},
)
self.proposal_generator.rpn_head.rpn_regressor.cls_logits = prep_fn(
self.proposal_generator.rpn_head.rpn_regressor.cls_logits,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_regressor.cls_logits"],
)
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred = prep_fn(
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_regressor.bbox_pred"],
)
self.roi_heads.box_head.roi_box_conv = prep_fn(
self.roi_heads.box_head.roi_box_conv,
qconfig,
fqn_to_example_inputs["roi_heads.box_head.roi_box_conv"],
prepare_custom_config={
"output_quantized_idxs": [0],
},
)
self.roi_heads.box_head.avgpool = prep_fn(
self.roi_heads.box_head.avgpool,
qconfig,
(torch.randn(1, 3, 224, 224),),
prepare_custom_config={
"input_quantized_idxs": [0],
"output_quantized_idxs": [0],
},
)
self.roi_heads.box_predictor.cls_score = prep_fn(
self.roi_heads.box_predictor.cls_score,
qconfig,
fqn_to_example_inputs["roi_heads.box_predictor.cls_score"],
prepare_custom_config={"input_quantized_idxs": [0]},
)
self.roi_heads.box_predictor.bbox_pred = prep_fn(
self.roi_heads.box_predictor.bbox_pred,
qconfig,
fqn_to_example_inputs["roi_heads.box_predictor.bbox_pred"],
prepare_custom_config={"input_quantized_idxs": [0]},
)
def default_rcnn_custom_convert_fx(self, cfg):
assert not isinstance(self.backbone, FPN), "FPN is not supported in FX mode"
self.backbone = convert_fx(
self.backbone,
convert_custom_config={
"preserved_attributes": ["size_divisibility", "padding_constraints"]
},
)
self.proposal_generator.rpn_head.rpn_feature = convert_fx(
self.proposal_generator.rpn_head.rpn_feature
)
self.proposal_generator.rpn_head.rpn_regressor.cls_logits = convert_fx(
self.proposal_generator.rpn_head.rpn_regressor.cls_logits
)
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred = convert_fx(
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred
)
self.roi_heads.box_head.roi_box_conv = convert_fx(
self.roi_heads.box_head.roi_box_conv
)
self.roi_heads.box_head.avgpool = convert_fx(self.roi_heads.box_head.avgpool)
self.roi_heads.box_predictor.cls_score = convert_fx(
self.roi_heads.box_predictor.cls_score
)
self.roi_heads.box_predictor.bbox_pred = convert_fx(
self.roi_heads.box_predictor.bbox_pred
)
return self
class D2Caffe2MetaArchPreprocessFunc(object):
def __init__(self, size_divisibility, device):
self.size_divisibility = size_divisibility
self.device = device
def __call__(self, inputs):
from detectron2.export.caffe2_modeling import (
convert_batched_inputs_to_c2_format,
)
data, im_info = convert_batched_inputs_to_c2_format(
inputs, self.size_divisibility, self.device
)
return (data, im_info)
@staticmethod
def get_params(cfg, model):
from caffe2.proto import caffe2_pb2
from detectron2.export.shared import get_pb_arg_vali, get_pb_arg_vals
fake_predict_net = caffe2_pb2.NetDef()
model.encode_additional_info(fake_predict_net, None)
size_divisibility = get_pb_arg_vali(fake_predict_net, "size_divisibility", 0)
device = get_pb_arg_vals(fake_predict_net, "device", b"cpu").decode("ascii")
return {
"size_divisibility": size_divisibility,
"device": device,
}
class D2Caffe2MetaArchPostprocessFunc(object):
def __init__(self, external_input, external_output, encoded_info):
self.external_input = external_input
self.external_output = external_output
self.encoded_info = encoded_info
def __call__(self, inputs, tensor_inputs, tensor_outputs):
from caffe2.proto import caffe2_pb2
from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP
from detectron2.export.shared import get_pb_arg_vals
encoded_info = self.encoded_info.encode("ascii")
fake_predict_net = caffe2_pb2.NetDef().FromString(encoded_info)
meta_architecture = get_pb_arg_vals(fake_predict_net, "meta_architecture", None)
meta_architecture = meta_architecture.decode("ascii")
model_class = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_architecture]
convert_outputs = model_class.get_outputs_converter(fake_predict_net, None)
c2_inputs = tensor_inputs
c2_results = dict(zip(self.external_output, tensor_outputs))
return convert_outputs(inputs, c2_inputs, c2_results)
@staticmethod
def get_params(cfg, model):
from caffe2.proto import caffe2_pb2
# NOTE: the post processing has different values for different meta
# architectures, here simply relying Caffe2 meta architecture to encode info
# into a NetDef and storing it as whole.
fake_predict_net = caffe2_pb2.NetDef()
model.encode_additional_info(fake_predict_net, None)
encoded_info = fake_predict_net.SerializeToString().decode("ascii")
# HACK: Caffe2MetaArch's post processing requires the blob name of model output,
# this information is missed for torchscript. There's no easy way to know this
# unless using NamedTuple for tracing.
external_input = ["data", "im_info"]
if cfg.MODEL.META_ARCHITECTURE == "GeneralizedRCNN":
external_output = ["bbox_nms", "score_nms", "class_nms"]
if cfg.MODEL.MASK_ON:
external_output.extend(["mask_fcn_probs"])
if cfg.MODEL.KEYPOINT_ON:
if cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT:
external_output.extend(["keypoints_out"])
else:
external_output.extend(["kps_score"])
else:
raise NotImplementedError("")
return {
"external_input": external_input,
"external_output": external_output,
"encoded_info": encoded_info,
}
class D2RCNNInferenceWrapper(nn.Module):
def __init__(
self,
model,
do_postprocess=False,
):
super().__init__()
self.model = model
self.do_postprocess = do_postprocess
def forward(self, image):
"""
This function describes what happends during the tracing. Note that the output
contains non-tensor, therefore the TracingAdaptedTorchscriptExport must be used in
order to convert the output back from flattened tensors.
"""
if self.do_postprocess:
inputs = [
{
"image": image,
# NOTE: the width/height is not available since the model takes a
# single image tensor as input. Therefore even though post-process
# is specified, the wrapped model doesn't resize the output to its
# original width/height.
# TODO: If this is needed, we might make the model take extra
# width/height info like the C2-style inputs.
}
]
return self.model.forward(inputs)[0]["instances"]
else:
inputs = [{"image": image}]
return self.model.inference(inputs, do_postprocess=False)[0]
@staticmethod
class Preprocess(object):
"""
This function describes how to covert orginal input (from the data loader)
to the inputs used during the tracing (i.e. the inputs of forward function).
"""
def __call__(self, batch):
assert len(batch) == 1, "only support single batch"
return batch[0]["image"]
class Postprocess(object):
def __init__(self, detector_postprocess_done_in_model=False):
"""
Args:
detector_postprocess_done_in_model (bool): whether `detector_postprocess`
has already applied in the D2RCNNInferenceWrapper
"""
self.detector_postprocess_done_in_model = detector_postprocess_done_in_model
def __call__(self, batch, inputs, outputs):
"""
This function describes how to run the predictor using exported model. Note
that `tracing_adapter_wrapper` runs the traced model under the hood and
behaves exactly the same as the forward function.
"""
assert len(batch) == 1, "only support single batch"
width, height = batch[0]["width"], batch[0]["height"]
if self.detector_postprocess_done_in_model:
image_shape = batch[0]["image"].shape # chw
if image_shape[1] != height or image_shape[2] != width:
raise NotImplementedError(
f"Image tensor (shape: {image_shape}) doesn't match the"
f" input width ({width}) height ({height}). Since post-process"
f" has been done inside the torchscript without width/height"
f" information, can't recover the post-processed output to "
f"orignail resolution."
)
return [{"instances": outputs}]
else:
r = detector_postprocess(outputs, height, width)
return [{"instances": r}]
# TODO: model.to(device) might not work for detection meta-arch, this function is the
# workaround, in general, we might need a meta-arch API for this if needed.
def _cast_detection_model(model, device):
# check model is an instance of one of the meta arch
from detectron2.export.caffe2_modeling import Caffe2MetaArch
if isinstance(model, Caffe2MetaArch):
model._wrapped_model = _cast_detection_model(model._wrapped_model, device)
return model
assert isinstance(model, tuple(META_ARCH_REGISTRY._obj_map.values()))
model.to(device)
# cast normalizer separately
if hasattr(model, "normalizer") and not (
hasattr(model, "pixel_mean") and hasattr(model, "pixel_std")
):
pixel_mean = inspect.getclosurevars(model.normalizer).nonlocals["pixel_mean"]
pixel_std = inspect.getclosurevars(model.normalizer).nonlocals["pixel_std"]
pixel_mean = pixel_mean.to(device)
pixel_std = pixel_std.to(device)
model.normalizer = lambda x: (x - pixel_mean) / pixel_std
return model
def _update_export_config_with_extra_files(export_config, extra_files):
export_config_dict = export_config._asdict()
if export_config_dict["model_export_kwargs"] is None:
export_config_dict["model_export_kwargs"] = {}
export_config_dict["model_export_kwargs"]["_extra_files"] = extra_files
return PredictorExportConfig(**export_config_dict)
@RCNN_PREPARE_FOR_EXPORT_REGISTRY.register()
def prepare_for_export_with_inference_config(
self, cfg: CfgNode, inputs: Optional[Tuple[Any]], predictor_type: str
) -> PredictorExportConfig:
"""
For certain tasks, the exported model needs to encode config as part of the extra
files.
"""
export_config = default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type)
# Add "inference_config.json" for the _extra_files as part of model_export_kwargs
extra_files = {"inference_config.json": json.dumps(flatten_config_dict(cfg))}
return _update_export_config_with_extra_files(export_config, extra_files)
|
d2go-main
|
d2go/modeling/meta_arch/rcnn.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import PanopticFPN as _PanopticFPN
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class PanopticFPN(_PanopticFPN):
def prepare_for_export(self, cfg, inputs, predictor_type):
raise NotImplementedError
|
d2go-main
|
d2go/modeling/meta_arch/panoptic_fpn.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling.meta_arch import ( # noqa
fcos as _fcos,
panoptic_fpn as _panoptic_fpn,
rcnn as _rcnn,
retinanet as _retinanet,
semantic_seg as _semantic_seg,
)
# @fb-only: from d2go.modeling.meta_arch import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/modeling/meta_arch/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.