python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.export.api import PredictorExportConfig
from d2go.modeling.meta_arch.rcnn import D2RCNNInferenceWrapper
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.config import configurable
from detectron2.layers.batch_norm import CycleBatchNormList
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.meta_arch.fcos import FCOS as d2_FCOS, FCOSHead
from detectron2.utils.registry import Registry
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.arch.utils.quantize_utils import (
wrap_non_quant_group_norm,
wrap_quant_subclass,
)
from mobile_cv.predictor.api import FuncInfo
logger = logging.getLogger(__name__)
# Registry to store custom export logic
FCOS_PREPARE_FOR_EXPORT_REGISTRY = Registry("FCOS_PREPARE_FOR_EXPORT")
class FCOSInferenceWrapper(nn.Module):
def __init__(
self,
model,
):
super().__init__()
self.model = model
def forward(self, image):
inputs = [{"image": image}]
return self.model.forward(inputs)[0]["instances"]
def add_fcos_configs(cfg):
cfg.MODEL.FCOS = CN()
# the number of foreground classes.
cfg.MODEL.FCOS.NUM_CLASSES = 80
cfg.MODEL.FCOS.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
cfg.MODEL.FCOS.NUM_CONVS = 4
cfg.MODEL.FCOS.HEAD_NORM = "GN"
# inference parameters
cfg.MODEL.FCOS.SCORE_THRESH_TEST = 0.04
cfg.MODEL.FCOS.TOPK_CANDIDATES_TEST = 1000
cfg.MODEL.FCOS.NMS_THRESH_TEST = 0.6
# Focal loss parameters
cfg.MODEL.FCOS.FOCAL_LOSS_ALPHA = 0.25
cfg.MODEL.FCOS.FOCAL_LOSS_GAMMA = 2.0
# Export method
cfg.FCOS_PREPARE_FOR_EXPORT = "default_fcos_prepare_for_export"
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class FCOS(d2_FCOS):
"""
Implement config->argument translation for FCOS model.
"""
@configurable
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
try:
feature_shapes = [backbone_shape[f] for f in cfg.MODEL.FCOS.IN_FEATURES]
except KeyError:
raise KeyError(
f"Available keys: {backbone_shape.keys()}. Requested keys: {cfg.MODEL.FCOS.IN_FEATURES}"
)
head = FCOSHead(
input_shape=feature_shapes,
num_classes=cfg.MODEL.FCOS.NUM_CLASSES,
conv_dims=[feature_shapes[0].channels] * cfg.MODEL.FCOS.NUM_CONVS,
norm=cfg.MODEL.FCOS.HEAD_NORM,
)
return {
"backbone": backbone,
"head": head,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"num_classes": cfg.MODEL.FCOS.NUM_CLASSES,
"head_in_features": cfg.MODEL.FCOS.IN_FEATURES,
# Loss parameters:
"focal_loss_alpha": cfg.MODEL.FCOS.FOCAL_LOSS_ALPHA,
"focal_loss_gamma": cfg.MODEL.FCOS.FOCAL_LOSS_GAMMA,
# Inference parameters:
"test_score_thresh": cfg.MODEL.FCOS.SCORE_THRESH_TEST,
"test_topk_candidates": cfg.MODEL.FCOS.TOPK_CANDIDATES_TEST,
"test_nms_thresh": cfg.MODEL.FCOS.NMS_THRESH_TEST,
"max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
}
# HACK: default FCOS export shares the same prepare functions w/ RCNN under certain constrains
def prepare_for_export(self, cfg, *args, **kwargs):
func = FCOS_PREPARE_FOR_EXPORT_REGISTRY.get(cfg.FCOS_PREPARE_FOR_EXPORT)
return func(self, cfg, *args, **kwargs)
def prepare_for_quant(self, cfg, *args, **kwargs):
"""Wrap each quantized part of the model to insert Quant and DeQuant in-place"""
model = self
qconfig = set_backend_and_create_qconfig(
cfg, is_train=cfg.QUANTIZATION.QAT.ENABLED
)
logger.info("Setup the model with qconfig:\n{}".format(qconfig))
model.backbone.qconfig = qconfig
model.head.qconfig = qconfig
# Wrap the backbone based on the architecture type
if isinstance(model.backbone, FPN):
# Same trick in RCNN's _apply_eager_mode_quant
model.backbone.bottom_up = wrap_quant_subclass(
model.backbone.bottom_up,
n_inputs=1,
n_outputs=len(model.backbone.bottom_up._out_features),
)
else:
model.backbone = wrap_quant_subclass(
model.backbone, n_inputs=1, n_outputs=len(model.backbone._out_features)
)
def unpack_cyclebatchnormlist(module):
# HACK: This function flattens CycleBatchNormList for quantization purpose
if isinstance(module, CycleBatchNormList):
if len(module) > 1:
# TODO: add quantization support of CycleBatchNormList
raise NotImplementedError(
"CycleBatchNormList w/ more than one element cannot be quantized"
)
else:
num_channel = module.weight.size(0)
new_module = nn.BatchNorm2d(num_channel, affine=True)
new_module.weight = module.weight
new_module.bias = module.bias
new_module.running_mean = module[0].running_mean
new_module.running_var = module[0].running_var
module = new_module
else:
for name, child in module.named_children():
new_child = unpack_cyclebatchnormlist(child)
if new_child is not child:
module.add_module(name, new_child)
return module
model.head = unpack_cyclebatchnormlist(model.head)
# Wrap the FCOS head
model.head = wrap_quant_subclass(
model.head,
n_inputs=len(cfg.MODEL.FCOS.IN_FEATURES),
n_outputs=len(cfg.MODEL.FCOS.IN_FEATURES) * 3,
)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
model = wrap_non_quant_group_norm(model)
return model
@FCOS_PREPARE_FOR_EXPORT_REGISTRY.register()
def default_fcos_prepare_for_export(self, cfg, inputs, predictor_type):
pytorch_model = self
preprocess_info = FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Preprocess, params={}
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=FCOSInferenceWrapper(pytorch_model),
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type,
preprocess_info=preprocess_info,
postprocess_info=FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Postprocess,
params={"detector_postprocess_done_in_model": True},
),
)
|
d2go-main
|
d2go/modeling/meta_arch/fcos.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import RetinaNet as _RetinaNet
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class RetinaNet(_RetinaNet):
def prepare_for_export(self, cfg, inputs, predictor_type):
raise NotImplementedError
|
d2go-main
|
d2go/modeling/meta_arch/retinanet.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List
import torch
import torch.nn as nn
from d2go.export.api import PredictorExportConfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import SemanticSegmentor as _SemanticSegmentor
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import ImageList
from mobile_cv.predictor.api import FuncInfo
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class SemanticSegmentor(_SemanticSegmentor):
def prepare_for_export(self, cfg, inputs, predictor_type):
preprocess_info = FuncInfo.gen_func_info(
PreprocessFunc,
params={
"size_divisibility": self.backbone.size_divisibility,
"device": str(self.device),
},
)
postprocess_info = FuncInfo.gen_func_info(
PostprocessFunc,
params={},
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=ModelWrapper(self),
data_generator=lambda x: (preprocess_func(x),),
preprocess_info=preprocess_info,
postprocess_info=postprocess_info,
)
class ModelWrapper(nn.Module):
def __init__(self, segmentor):
super().__init__()
self.segmentor = segmentor
def forward(self, x):
x = (x - self.segmentor.pixel_mean) / self.segmentor.pixel_std
features = self.segmentor.backbone(x)
results, losses = self.segmentor.sem_seg_head(features, targets=None)
return results
class PreprocessFunc(object):
"""
A common preprocessing module for semantic segmentation models.
"""
def __init__(self, size_divisibility, device):
self.size_divisibility = size_divisibility
self.device = device
def __call__(self, batched_inputs: List[Dict[str, Any]]) -> torch.Tensor:
"""
Retreive image tensor from dataloader batches.
Args:
batched_inputs: (List[Dict[str, Tensor]]): output from a
D2Go train or test data loader.
Returns:
input images (torch.Tensor): ImageList-wrapped NCHW tensor
(i.e. with padding and divisibility alignment) of batches' images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = ImageList.from_tensors(images, self.size_divisibility)
return images.tensor
class PostprocessFunc(object):
"""
A common postprocessing module for semantic segmentation models.
"""
def __call__(
self,
batched_inputs: List[Dict[str, Any]],
tensor_inputs: torch.Tensor,
tensor_outputs: torch.Tensor,
) -> List[Dict[str, Any]]:
"""
Rescales sem_seg logits to original image input resolution,
and packages the logits into D2Go's expected output format.
Args:
inputs (List[Dict[str, Tensor]]): batched inputs from the dataloader.
tensor_inputs (Tensor): tensorized inputs, e.g. from `PreprocessFunc`.
tensor_outputs (Tensor): sem seg logits tensor from the model to process.
Returns:
processed_results (List[Dict]): List of D2Go output dicts ready to be used
downstream in an Evaluator, for export, etc.
"""
results = tensor_outputs # nchw
processed_results = []
for result, input_per_image in zip(results, batched_inputs):
height = input_per_image.get("height")
width = input_per_image.get("width")
image_tensor_shape = input_per_image["image"].shape
image_size = (image_tensor_shape[1], image_tensor_shape[2])
# D2's sem_seg_postprocess rescales sem seg masks to the
# provided original input resolution.
r = sem_seg_postprocess(result, image_size, height, width)
processed_results.append({"sem_seg": r})
return processed_results
|
d2go-main
|
d2go/modeling/meta_arch/semantic_seg.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from d2go.modeling.modeldef.fbnet_modeldef_registry import FBNetV2ModelArch
from d2go.registry.bootstrap import lazy_on_bootstrap
from mobile_cv.arch.fbnet_v2.modeldef_utils import _ex, e1, e1p, e2, e3, e4, e6
def _mutated_tuple(tp, pos, value):
tp_list = list(tp)
tp_list[pos] = value
return tuple(tp_list)
@lazy_on_bootstrap
def _repeat_last(stage, n=None):
"""
Repeat the last "layer" of given stage, i.e. a (op_type, c, s, n_repeat, ...)
tuple, reset n_repeat if specified otherwise kept the original value.
"""
assert isinstance(stage, list)
assert all(isinstance(x, tuple) for x in stage)
last_layer = copy.deepcopy(stage[-1])
if n is not None:
last_layer = _mutated_tuple(last_layer, 3, n)
return last_layer
_BASIC_ARGS = {
# skil norm and activation for depthwise conv in IRF module, this make the
# model easier to quantize.
"dw_skip_bnrelu": True,
# uncomment below (always_pw and bias) to match model definition of the
# FBNetV1 builder.
# "always_pw": True,
# "bias": False,
# temporarily disable zero_last_bn_gamma
"zero_last_bn_gamma": False,
}
DEFAULT_STAGES = [
# NOTE: each stage is a list of (op_type, out_channels, stride, n_repeat, ...)
# resolution stage 0, equivalent to 224->112
[("conv_k3", 32, 2, 1), ("ir_k3", 16, 1, 1, e1)],
# resolution stage 1, equivalent to 112->56
[("ir_k3", 24, 2, 2, e6)],
# resolution stage 2, equivalent to 56->28
[("ir_k3", 32, 2, 3, e6)],
# resolution stage 3, equivalent to 28->14
[("ir_k3", 64, 2, 4, e6), ("ir_k3", 96, 1, 3, e6)],
# resolution stage 4, equivalent to 14->7
[("ir_k3", 160, 2, 3, e6), ("ir_k3", 320, 1, 1, e6)],
# final stage, equivalent to 7->1, ignored
]
IRF_CFG = {"less_se_channels": False}
FBNetV3_A_dsmask = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 1, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 32, 1, 1, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 40, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 72, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 112, 1, 3, {"expansion": 4}, IRF_CFG),
],
[
("ir_k5", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A_dsmask_tiny = [
[("conv_k3", 8, 2, 1), ("ir_k3", 8, 1, 1, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 16, 2, 1, {"expansion": 3}, IRF_CFG),
("ir_k5", 16, 1, 1, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 24, 1, 2, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 40, 1, 2, {"expansion": 3}, IRF_CFG),
("ir_k5", 64, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 64, 1, 2, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 92, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 92, 1, 2, {"expansion": 4}, IRF_CFG),
("ir_k5", 92, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A = [
# FBNetV3 arch without hs
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 32, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 64, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5_se", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 112, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_B = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_C = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 24, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 48, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 48, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 88, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 88, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 120, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 216, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 216, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 216, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_D = [
[("conv_k3", 24, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k3", 24, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 24, 1, 5, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 128, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 128, 1, 6, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 208, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 208, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_E = [
[("conv_k3", 24, 2, 1), ("ir_k3", 16, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 48, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 48, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 80, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 80, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 128, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 128, 1, 7, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 216, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 216, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_F = [
[("conv_k3", 24, 2, 1), ("ir_k3", 24, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 32, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 56, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 56, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 88, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 88, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 144, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 144, 1, 8, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 248, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 248, 1, 6, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 272, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_G = [
[("conv_k3", 32, 2, 1), ("ir_k3", 24, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 40, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 56, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 56, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 104, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 104, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 160, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 160, 1, 8, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 264, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 264, 1, 6, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 288, 1, 2, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_H = [
[("conv_k3", 48, 2, 1), ("ir_k3", 32, 1, 4, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 64, 1, 6, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 80, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 80, 1, 6, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 160, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 160, 1, 6, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 240, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 12, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 400, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 400, 1, 8, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 480, 1, 3, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A_no_se = [
# FBNetV3 without hs and SE (SE is not quantization friendly)
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 32, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 64, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 112, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_B_no_se = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
# FBNetV3_B model, a lighter version for real-time inference
FBNetV3_B_light_no_se = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 2, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 40, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
LARGE_BOX_HEAD_STAGES = [
[("ir_k3", 160, 2, 1, e4), ("ir_k3", 160, 1, 2, e6), ("ir_k3", 240, 1, 1, e6)],
]
SMALL_BOX_HEAD_STAGES = [
[("ir_k3", 128, 2, 1, e4), ("ir_k3", 128, 1, 2, e6), ("ir_k3", 160, 1, 1, e6)],
]
TINY_BOX_HEAD_STAGES = [
[("ir_k3", 64, 2, 1, e4), ("ir_k3", 64, 1, 2, e4), ("ir_k3", 80, 1, 1, e4)],
]
LARGE_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 160, 1, 1, e4), ("ir_k3", 160, 1, 3, e6), ("ir_k3", 80, -2, 1, e3)],
]
LARGE_UPSAMPLE_HEAD_D21_STAGES = [
[("ir_k3", 192, 1, 1, e4), ("ir_k3", 192, 1, 5, e3), ("ir_k3", 96, -2, 1, e3)],
]
SMALL_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 128, 1, 1, e4), ("ir_k3", 128, 1, 3, e6), ("ir_k3", 64, -2, 1, e3)],
]
# NOTE: Compared with SMALL_UPSAMPLE_HEAD_STAGES, this does one more down-sample
# in the first "layer" and then up-sample twice
SMALL_DS_UPSAMPLE_HEAD_STAGES = [
[
("ir_k3", 128, 2, 1, e4),
("ir_k3", 128, 1, 2, e6),
("ir_k3", 128, -2, 1, e6),
("ir_k3", 64, -2, 1, e3),
], # noqa
]
TINY_DS_UPSAMPLE_HEAD_STAGES = [
[
("ir_k3", 64, 2, 1, e4),
("ir_k3", 64, 1, 2, e4),
("ir_k3", 64, -2, 1, e4),
("ir_k3", 40, -2, 1, e3),
], # noqa
]
FPN_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 96, 1, 1, e6), ("ir_k3", 160, 1, 3, e6), ("ir_k3", 80, -2, 1, e3)],
]
MODEL_ARCH_BUILTIN = {
"default": {
"trunk": DEFAULT_STAGES[0:4],
"rpn": [[_repeat_last(DEFAULT_STAGES[3])]],
"bbox": LARGE_BOX_HEAD_STAGES,
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"default_dsmask": {
"trunk": DEFAULT_STAGES[0:4],
"rpn": [[_repeat_last(DEFAULT_STAGES[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A": {
"trunk": FBNetV3_A[0:4],
"rpn": [[_repeat_last(FBNetV3_A[3])]],
"bbox": [FBNetV3_A[4]],
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_C5": {
"trunk": FBNetV3_A[0:5],
"rpn": [[_repeat_last(FBNetV3_A[3])]],
"bbox": [FBNetV3_A[4]],
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B": {
"trunk": FBNetV3_B[0:4],
"rpn": [[_repeat_last(FBNetV3_B[3])]],
"bbox": [FBNetV3_B[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_C5": {
"trunk": FBNetV3_B[0:5],
"rpn": [[_repeat_last(FBNetV3_B[3])]],
"bbox": [FBNetV3_B[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_C": {
"trunk": FBNetV3_C[0:4],
"rpn": [[_repeat_last(FBNetV3_C[3])]],
"bbox": [FBNetV3_C[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_C_C5": {
"trunk": FBNetV3_C[0:5],
"rpn": [[_repeat_last(FBNetV3_C[3])]],
"bbox": [FBNetV3_C[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_D": {
"trunk": FBNetV3_D[0:4],
"rpn": [[_repeat_last(FBNetV3_D[3])]],
"bbox": [FBNetV3_D[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_E": {
"trunk": FBNetV3_E[0:4],
"rpn": [[_repeat_last(FBNetV3_E[3])]],
"bbox": [FBNetV3_E[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_F": {
"trunk": FBNetV3_F[0:4],
"rpn": [[_repeat_last(FBNetV3_F[3])]],
"bbox": [FBNetV3_F[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_F_C5": {
"trunk": FBNetV3_F[0:5],
"rpn": [[_repeat_last(FBNetV3_F[3])]],
"bbox": [FBNetV3_F[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G": {
"trunk": FBNetV3_G[0:4],
"rpn": [[_repeat_last(FBNetV3_G[3])]],
"bbox": [FBNetV3_G[4]],
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G_C5": {
"trunk": FBNetV3_G[0:5],
"rpn": [[_repeat_last(FBNetV3_G[3])]],
"bbox": [FBNetV3_G[4]],
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_H": {
"trunk": FBNetV3_H[0:4],
"rpn": [[_repeat_last(FBNetV3_H[3])]],
"bbox": [FBNetV3_H[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask_C5": {
"trunk": FBNetV3_A_dsmask,
"rpn": [[_repeat_last(FBNetV3_A_dsmask[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask": {
"trunk": FBNetV3_A_dsmask[0:4],
"rpn": [[_repeat_last(FBNetV3_A_dsmask[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask_tiny": {
"trunk": FBNetV3_A_dsmask_tiny[0:4],
"rpn": [[_repeat_last(FBNetV3_A_dsmask_tiny[3])]],
"bbox": TINY_BOX_HEAD_STAGES,
"mask": TINY_DS_UPSAMPLE_HEAD_STAGES,
"kpts": TINY_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_light_large": {
"trunk": FBNetV3_B_light_no_se[0:4],
"rpn": [[_repeat_last(FBNetV3_B_light_no_se[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_light_no_se_C5": {
"trunk": FBNetV3_B_light_no_se[0:5],
"rpn": [[_repeat_last(FBNetV3_B_light_no_se[3])]],
"bbox": [FBNetV3_B_light_no_se[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G_fpn": {
"trunk": FBNetV3_G[0:5], # FPN uses all 5 stages
"rpn": [[_repeat_last(FBNetV3_G[3], n=1)]],
"bbox": [FBNetV3_G[4]],
"mask": FPN_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
}
FBNetV2ModelArch.add_archs(MODEL_ARCH_BUILTIN)
|
d2go-main
|
d2go/modeling/modeldef/modeldef.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This is the centralized place to define modeldef for all projects under D2Go.
"""
# Populating registreis
from d2go.modeling.modeldef import modeldef as _modeldef # noqa
# @fb-only: from d2go.modeling.modeldef import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/modeling/modeldef/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
class FBNetV2ModelArch(object):
_MODEL_ARCH = {}
@staticmethod
def add(name, arch):
assert (
name not in FBNetV2ModelArch._MODEL_ARCH
), "Arch name '{}' is already existed".format(name)
FBNetV2ModelArch._MODEL_ARCH[name] = arch
@staticmethod
def add_archs(archs):
for name, arch in archs.items():
FBNetV2ModelArch.add(name, arch)
@staticmethod
def get(name):
return copy.deepcopy(FBNetV2ModelArch._MODEL_ARCH[name])
|
d2go-main
|
d2go/modeling/modeldef/fbnet_modeldef_registry.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling.backbone import fbnet_v2 as _fbnet_v2 # noqa
# @fb-only: from d2go.modeling.backbone import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/modeling/backbone/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import logging
from typing import List
import torch
import torch.nn as nn
from d2go.modeling.backbone.modules import (
KeypointRCNNConvUpsamplePredictorNoUpscale,
KeypointRCNNIRFPredictorNoUpscale,
KeypointRCNNPredictor,
KeypointRCNNPredictorNoUpscale,
MaskRCNNConv1x1Predictor,
RPNHeadConvRegressor,
)
from d2go.modeling.modeldef.fbnet_modeldef_registry import FBNetV2ModelArch
from detectron2.layers import ShapeSpec
from detectron2.modeling import (
Backbone,
BACKBONE_REGISTRY,
build_anchor_generator,
RPN_HEAD_REGISTRY,
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
from detectron2.modeling.roi_heads import box_head, keypoint_head, mask_head
from detectron2.utils.logger import log_first_n
from mobile_cv.arch.fbnet_v2 import fbnet_builder as mbuilder
from mobile_cv.arch.utils.helper import format_dict_expanding_list_values
logger = logging.getLogger(__name__)
FBNET_BUILDER_IDENTIFIER = "fbnetv2"
def _get_builder_norm_args(cfg):
norm_name = cfg.MODEL.FBNET_V2.NORM
norm_args = {"name": norm_name}
assert all(isinstance(x, dict) for x in cfg.MODEL.FBNET_V2.NORM_ARGS)
for dic in cfg.MODEL.FBNET_V2.NORM_ARGS:
norm_args.update(dic)
return norm_args
def _merge_fbnetv2_arch_def(cfg):
arch_def = {}
assert all(
isinstance(x, dict) for x in cfg.MODEL.FBNET_V2.ARCH_DEF
), cfg.MODEL.FBNET_V2.ARCH_DEF
for dic in cfg.MODEL.FBNET_V2.ARCH_DEF:
arch_def.update(dic)
return arch_def
def _parse_arch_def(cfg):
arch = cfg.MODEL.FBNET_V2.ARCH
arch_def = cfg.MODEL.FBNET_V2.ARCH_DEF
assert (arch != "" and not arch_def) ^ (
not arch and arch_def != []
), "Only allow one unset node between MODEL.FBNET_V2.ARCH ({}) and MODEL.FBNET_V2.ARCH_DEF ({})".format(
arch, arch_def
)
arch_def = FBNetV2ModelArch.get(arch) if arch else _merge_fbnetv2_arch_def(cfg)
# NOTE: arch_def is a dictionary describing the CNN architecture for creating
# the detection model. It can describe a wide range of models including the
# original FBNet. Each key-value pair expresses either a sub part of the model
# like trunk or head, or stores other meta information.
message = 'Using un-unified arch_def for ARCH "{}" (without scaling):\n{}'.format(
arch, format_dict_expanding_list_values(arch_def)
)
log_first_n(logging.INFO, message, n=1, key="message")
return arch_def
def _get_fbnet_builder_and_arch_def(cfg):
arch_def = _parse_arch_def(cfg)
# NOTE: one can store extra information in arch_def to configurate FBNetBuilder,
# after this point, builder and arch_def will become independent.
basic_args = arch_def.pop("basic_args", {})
builder = mbuilder.FBNetBuilder(
width_ratio=cfg.MODEL.FBNET_V2.SCALE_FACTOR,
width_divisor=cfg.MODEL.FBNET_V2.WIDTH_DIVISOR,
bn_args=_get_builder_norm_args(cfg),
)
builder.add_basic_args(**basic_args)
return builder, arch_def
def _get_stride_per_stage(blocks):
"""
Count the accummulated stride per stage given a list of blocks. The mbuilder
provides API for counting per-block accumulated stride, this function leverages
it to count per-stage accumulated stride.
Input: a list of blocks from the unified arch_def. Note that the stage_idx
must be contiguous (not necessarily starting from 0), and can be
non-ascending (not tested).
Output: a list of accumulated stride per stage, starting from lowest stage_idx.
"""
stride_per_block = mbuilder.count_stride_each_block(blocks)
assert len(stride_per_block) == len(blocks)
stage_idx_set = {s["stage_idx"] for s in blocks}
# assume stage idx are contiguous, eg. 1, 2, 3, ...
assert max(stage_idx_set) - min(stage_idx_set) + 1 == len(stage_idx_set)
start_stage_id = min(stage_idx_set)
ids_per_stage = [
[i for i, s in enumerate(blocks) if s["stage_idx"] == stage_idx]
for stage_idx in range(start_stage_id, start_stage_id + len(stage_idx_set))
] # eg. [[0], [1, 2], [3, 4, 5, 6], ...]
block_stride_per_stage = [
[stride_per_block[i] for i in ids] for ids in ids_per_stage
] # eg. [[1], [2, 1], [2, 1, 1, 1], ...]
stride_per_stage = [
list(itertools.accumulate(s, lambda x, y: x * y))[-1]
for s in block_stride_per_stage
] # eg. [1, 2, 2, ...]
accum_stride_per_stage = list(
itertools.accumulate(stride_per_stage, lambda x, y: x * y)
) # eg. [first*1, first*2, first*4, ...]
assert accum_stride_per_stage[-1] == mbuilder.count_strides(blocks)
return accum_stride_per_stage
def fbnet_identifier_checker(func):
"""Can be used to decorate _load_from_state_dict"""
def wrapper(self, state_dict, prefix, *args, **kwargs):
possible_keys = [k for k in state_dict.keys() if k.startswith(prefix)]
if not all(FBNET_BUILDER_IDENTIFIER in k for k in possible_keys):
logger.warning(
"Couldn't match FBNetV2 pattern given prefix {}, possible keys: \n{}".format(
prefix, "\n".join(possible_keys)
)
)
if any("xif" in k for k in possible_keys):
raise RuntimeError(
"Seems a FBNetV1 trained checkpoint is loaded by FBNetV2 model,"
" which is not supported. Please consider re-train your model"
" using the same setup as before (it will be FBNetV2). If you"
" need to run the old FBNetV1 models, those configs can be"
" still found, see D19477651 as example."
)
return func(self, state_dict, prefix, *args, **kwargs)
return wrapper
# pyre-fixme[11]: Annotation `Sequential` is not defined as a type.
class FBNetModule(nn.Sequential):
@fbnet_identifier_checker
def _load_from_state_dict(self, *args, **kwargs):
return super()._load_from_state_dict(*args, **kwargs)
def build_fbnet(cfg, name, in_channels):
"""
Create a FBNet module using FBNet V2 builder.
Args:
cfg (CfgNode): the config that contains MODEL.FBNET_V2.
name (str): the key in arch_def that represents a subpart of network
in_channels (int): input channel size
Returns:
nn.Sequential: the first return is a nn.Sequential, each element
corresponds a stage in arch_def.
List[ShapeSpec]: the second return is a list of ShapeSpec containing the
output channels and accumulated strides for that stage.
"""
builder, raw_arch_def = _get_fbnet_builder_and_arch_def(cfg)
# Reset the last_depth for this builder (might have been cached), this is
# the only mutable member variable.
builder.last_depth = in_channels
# NOTE: Each sub part of the model consists of several stages and each stage
# has several blocks. "Raw" arch_def (Dict[str, List[List[Tuple]]]) uses a
# list of stages to describe the architecture, which is more compact and
# thus written as builtin metadata (inside FBNetV2ModelArch) or config
# (MODEL.FBNET_V2.ARCH_DEF). "Unified" arch_def (Dict[str, List[Dict]])
# uses a list blocks from all stages instead, which is recognized by builder.
arch_def = mbuilder.unify_arch_def(raw_arch_def, [name])
arch_def = {name: arch_def[name]}
logger.info(
"Build FBNet using unified arch_def:\n{}".format(
format_dict_expanding_list_values(arch_def)
)
)
arch_def_blocks = arch_def[name]
stages = []
trunk_stride_per_stage = _get_stride_per_stage(arch_def_blocks)
shape_spec_per_stage = []
for i, stride_i in enumerate(trunk_stride_per_stage):
stages.append(
builder.build_blocks(
arch_def_blocks,
stage_indices=[i],
prefix_name=FBNET_BUILDER_IDENTIFIER + "_",
)
)
shape_spec_per_stage.append(
ShapeSpec(
channels=builder.last_depth,
stride=stride_i,
)
)
return FBNetModule(*stages), shape_spec_per_stage
class FBNetV2Backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super(FBNetV2Backbone, self).__init__()
stages, shape_specs = build_fbnet(
cfg, name="trunk", in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
)
self._trunk_stage_names = []
self._trunk_stages = []
self._out_feature_channels = {}
self._out_feature_strides = {}
for i, (stage, shape_spec) in enumerate(zip(stages, shape_specs)):
name = "trunk{}".format(i)
self.add_module(name, stage)
self._trunk_stage_names.append(name)
self._trunk_stages.append(stage)
self._out_feature_channels[name] = shape_spec.channels
self._out_feature_strides[name] = shape_spec.stride
# returned features are the final output of each stage
self._out_features = self._trunk_stage_names
self._trunk_stage_names = tuple(self._trunk_stage_names)
def __prepare_scriptable__(self):
ret = copy.deepcopy(self)
ret._trunk_stages = nn.ModuleList(ret._trunk_stages)
for k in self._trunk_stage_names:
delattr(ret, k)
return ret
@fbnet_identifier_checker
def _load_from_state_dict(self, *args, **kwargs):
return super()._load_from_state_dict(*args, **kwargs)
# return features for each stage
def forward(self, x):
features = {}
for name, stage in zip(self._trunk_stage_names, self._trunk_stages):
x = stage(x)
features[name] = x
return features
class FBNetV2FPN(FPN):
"""
FPN module for FBNet.
"""
pass
def build_fbnet_backbone(cfg):
return FBNetV2Backbone(cfg)
@BACKBONE_REGISTRY.register()
class FBNetV2C4Backbone(Backbone):
def __init__(self, cfg, _):
super(FBNetV2C4Backbone, self).__init__()
self.body = build_fbnet_backbone(cfg)
self._out_features = self.body._out_features
self._out_feature_strides = self.body._out_feature_strides
self._out_feature_channels = self.body._out_feature_channels
def forward(self, x):
return self.body(x)
@BACKBONE_REGISTRY.register()
def FBNetV2FpnBackbone(cfg, _):
backbone = FBNetV2FPN(
bottom_up=build_fbnet_backbone(cfg),
in_features=cfg.MODEL.FPN.IN_FEATURES,
out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
)
return backbone
@BACKBONE_REGISTRY.register()
def FBNetV2RetinaNetBackbone(cfg, _):
bottom_up = build_fbnet_backbone(cfg)
in_channels_p6p7 = bottom_up.output_shape()[cfg.MODEL.FPN.IN_FEATURES[-1]].channels
top_block = LastLevelP6P7(in_channels_p6p7, cfg.MODEL.FPN.OUT_CHANNELS)
top_block.in_feature = cfg.MODEL.FPN.IN_FEATURES[-1]
backbone = FBNetV2FPN(
bottom_up=bottom_up,
in_features=cfg.MODEL.FPN.IN_FEATURES,
out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
norm=cfg.MODEL.FPN.NORM,
top_block=top_block,
)
return backbone
@RPN_HEAD_REGISTRY.register()
class FBNetV2RpnHead(nn.Module):
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super(FBNetV2RpnHead, self).__init__()
in_channels = [x.channels for x in input_shape]
assert len(set(in_channels)) == 1
in_channels = in_channels[0]
anchor_generator = build_anchor_generator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert len(set(num_cell_anchors)) == 1
num_cell_anchors = num_cell_anchors[0]
self.rpn_feature, shape_specs = build_fbnet(
cfg, name="rpn", in_channels=in_channels
)
self.rpn_regressor = RPNHeadConvRegressor(
in_channels=shape_specs[-1].channels,
num_anchors=num_cell_anchors,
box_dim=box_dim,
)
def forward(self, x: List[torch.Tensor]):
x = [self.rpn_feature(y) for y in x]
return self.rpn_regressor(x)
@box_head.ROI_BOX_HEAD_REGISTRY.register()
class FBNetV2RoIBoxHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIBoxHead, self).__init__()
self.roi_box_conv, shape_specs = build_fbnet(
cfg, name="bbox", in_channels=input_shape.channels
)
self._out_channels = shape_specs[-1].channels
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.roi_box_conv(x)
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
x = self.avgpool(x)
return x
@property
@torch.jit.unused
def output_shape(self):
return ShapeSpec(channels=self._out_channels)
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHead(keypoint_head.BaseKeypointRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHead, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg, name="kpts", in_channels=input_shape.channels
)
self.predictor = KeypointRCNNPredictor(
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKRCNNPredictorNoUpscale(keypoint_head.BaseKeypointRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHeadKRCNNPredictorNoUpscale, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNPredictorNoUpscale(
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKPRCNNIRFPredictorNoUpscale(
keypoint_head.BaseKeypointRCNNHead,
):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHeadKPRCNNIRFPredictorNoUpscale, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNIRFPredictorNoUpscale(
cfg,
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKPRCNNConvUpsamplePredictorNoUpscale(
keypoint_head.BaseKeypointRCNNHead,
):
def __init__(self, cfg, input_shape: ShapeSpec):
super(
FBNetV2RoIKeypointHeadKPRCNNConvUpsamplePredictorNoUpscale, self
).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNConvUpsamplePredictorNoUpscale(
cfg,
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@mask_head.ROI_MASK_HEAD_REGISTRY.register()
class FBNetV2RoIMaskHead(mask_head.BaseMaskRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIMaskHead, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="mask",
in_channels=input_shape.channels,
)
num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.predictor = MaskRCNNConv1x1Predictor(shape_specs[-1].channels, num_classes)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
|
d2go-main
|
d2go/modeling/backbone/fbnet_v2.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import torch
import torch.nn as nn
from detectron2 import layers
from detectron2.utils.tracing import is_fx_tracing
from mobile_cv.arch.fbnet_v2.irf_block import IRFBlock
class RPNHeadConvRegressor(nn.Module):
"""
A simple RPN Head for classification and bbox regression
"""
def __init__(self, in_channels, num_anchors, box_dim=4):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
box_dim (int): dimension of bbox
"""
super(RPNHeadConvRegressor, self).__init__()
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * box_dim, kernel_size=1, stride=1
)
for l in [self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x: List[torch.Tensor]):
if not is_fx_tracing():
torch._assert(isinstance(x, (list, tuple)), "Unexpected data type")
logits = [self.cls_logits(y) for y in x]
bbox_reg = [self.bbox_pred(y) for y in x]
return logits, bbox_reg
class MaskRCNNConv1x1Predictor(nn.Module):
def __init__(self, in_channels, out_channels):
super(MaskRCNNConv1x1Predictor, self).__init__()
num_classes = out_channels
num_inputs = in_channels
self.mask_fcn_logits = nn.Conv2d(num_inputs, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
return self.mask_fcn_logits(x)
class KeypointRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_keypoints):
super(KeypointRCNNPredictor, self).__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
input_features,
num_keypoints,
deconv_kernel,
stride=2,
padding=deconv_kernel // 2 - 1,
)
nn.init.kaiming_normal_(
self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
)
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.up_scale = 2
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
x = layers.interpolate(
x, scale_factor=self.up_scale, mode="bilinear", align_corners=False
)
return x
class KeypointRCNNPredictorNoUpscale(nn.Module):
def __init__(self, in_channels, num_keypoints):
super(KeypointRCNNPredictorNoUpscale, self).__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
input_features,
num_keypoints,
deconv_kernel,
stride=2,
padding=deconv_kernel // 2 - 1,
)
nn.init.kaiming_normal_(
self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
)
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
return x
class KeypointRCNNIRFPredictorNoUpscale(nn.Module):
def __init__(self, cfg, in_channels, num_keypoints):
super(KeypointRCNNIRFPredictorNoUpscale, self).__init__()
input_features = in_channels
self.kps_score_lowres = IRFBlock(
input_features,
num_keypoints,
stride=-2,
expansion=3,
bn_args="none",
dw_skip_bnrelu=True,
)
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
return x
class KeypointRCNNConvUpsamplePredictorNoUpscale(nn.Module):
def __init__(self, cfg, in_channels, num_keypoints):
super(KeypointRCNNConvUpsamplePredictorNoUpscale, self).__init__()
input_features = in_channels
self.kps_score_lowres = nn.Conv2d(
input_features,
num_keypoints,
kernel_size=3,
stride=1,
padding=1,
)
self.out_channels = num_keypoints
def forward(self, x):
x = layers.interpolate(x, scale_factor=(2, 2), mode="nearest")
x = self.kps_score_lowres(x)
return x
|
d2go-main
|
d2go/modeling/backbone/modules.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
def add_fbnet_default_configs(_C):
"""FBNet options and default values"""
_C.MODEL.FBNET = CN()
_C.MODEL.FBNET.ARCH = "default"
# custom arch
_C.MODEL.FBNET.ARCH_DEF = ""
_C.MODEL.FBNET.BN_TYPE = "bn"
_C.MODEL.FBNET.NUM_GROUPS = 32 # for gn usage only
_C.MODEL.FBNET.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET.WIDTH_DIVISOR = 1
_C.MODEL.FBNET.DW_CONV_SKIP_BN = True
_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0
_C.MODEL.FBNET.DET_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.DET_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0
# 0 to use all blocks defined in arch_def
_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0
_C.MODEL.FBNET.RPN_BN_TYPE = ""
# number of channels input to trunk
_C.MODEL.FBNET.STEM_IN_CHANNELS = 3
def add_fbnet_v2_default_configs(_C):
_C.MODEL.FBNET_V2 = CN()
_C.MODEL.FBNET_V2.ARCH = "default"
_C.MODEL.FBNET_V2.ARCH_DEF = []
# number of channels input to trunk
_C.MODEL.FBNET_V2.STEM_IN_CHANNELS = 3
_C.MODEL.FBNET_V2.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET_V2.WIDTH_DIVISOR = 1
# normalization configs
# name of norm such as "bn", "sync_bn", "gn"
_C.MODEL.FBNET_V2.NORM = "bn"
# for advanced use case that requries extra arguments, passing a list of
# dict such as [{"num_groups": 8}, {"momentum": 0.1}] (merged in given order).
# Note that string written it in .yaml will be evaluated by yacs, thus this
# node will become normal python object.
# https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L410
_C.MODEL.FBNET_V2.NORM_ARGS = []
_C.MODEL.VT_FPN = CN()
_C.MODEL.VT_FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"]
_C.MODEL.VT_FPN.OUT_CHANNELS = 256
_C.MODEL.VT_FPN.LAYERS = 3
_C.MODEL.VT_FPN.TOKEN_LS = [16, 16, 8, 8]
_C.MODEL.VT_FPN.TOKEN_C = 1024
_C.MODEL.VT_FPN.HEADS = 16
_C.MODEL.VT_FPN.MIN_GROUP_PLANES = 64
_C.MODEL.VT_FPN.NORM = "BN"
_C.MODEL.VT_FPN.POS_HWS = []
_C.MODEL.VT_FPN.POS_N_DOWNSAMPLE = []
def add_bifpn_default_configs(_C):
_C.MODEL.BIFPN = CN()
_C.MODEL.BIFPN.DEPTH_MULTIPLIER = 1
_C.MODEL.BIFPN.SCALE_FACTOR = 1
_C.MODEL.BIFPN.WIDTH_DIVISOR = 8
_C.MODEL.BIFPN.NORM = "bn"
_C.MODEL.BIFPN.NORM_ARGS = []
_C.MODEL.BIFPN.TOP_BLOCK_BEFORE_FPN = False
|
d2go-main
|
d2go/modeling/backbone/fbnet_cfg.py
|
from typing import Tuple
import torch
from d2go.quantization.learnable_qat import convert_to_learnable_qconfig
from mobile_cv.common.misc.registry import Registry
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from mobile_cv.common.misc.oss_utils import fb_overwritable
QCONFIG_CREATOR_REGISTRY = Registry("QCONFIG_CREATOR_REGISTRY")
def set_backend_and_create_qconfig(cfg, *, is_train):
"""
Recommended function to create qconfig given D2Go's quantization config.
"""
# In case we need different implmentation, we can add a new key called
# QUANTIZATION.QCONFIG_CREATOR with "smart" as default value, and use this key
# to toggle between registries.
return QCONFIG_CREATOR_REGISTRY.get("smart")(cfg, is_train=is_train)
def holistic_get_qconfig(backend, is_qat, use_symmetric=False):
"""
Config-less vanilla way to create the QConfig, suitable for explicitly creating qconfig.
"""
if use_symmetric:
if not backend == "qnnpack":
raise ValueError(
f"Only qnnpack supports Symmetric quantization, given: {backend}"
)
if is_qat:
return torch.ao.quantization.default_symmetric_qnnpack_qat_qconfig
else:
return torch.ao.quantization.default_symmetric_qnnpack_qconfig
else:
if is_qat:
return torch.ao.quantization.get_default_qat_qconfig(backend)
else:
return torch.ao.quantization.get_default_qconfig(backend)
@QCONFIG_CREATOR_REGISTRY.register("smart")
def _smart_set_backend_and_create_qconfig(cfg, *, is_train):
"""
This is the default / "smart" way to create qconfig based on various of configs,
supports:
- learnable QAT
- set symmetric quantization via backend.
"""
backend, options = _smart_parse_extended_backend(cfg.QUANTIZATION.BACKEND)
is_symmetric = options["is_symmetric"]
# Set backend
torch.backends.quantized.engine = backend
qat_method = cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD
assert qat_method in ["default", "learnable"]
qconfig = holistic_get_qconfig(
backend=backend, is_qat=is_train, use_symmetric=is_symmetric
)
if is_train and qat_method == "learnable":
qconfig = convert_to_learnable_qconfig(qconfig)
return qconfig
def validate_native_backend(backend):
_PYTORCH_NATIVE_BACKENDS = ["fbgemm", "qnnpack"]
if backend not in _PYTORCH_NATIVE_BACKENDS:
raise ValueError(
f"Unrecognized backend: {backend}, PyTorch"
f" supported backends are: {_PYTORCH_NATIVE_BACKENDS}"
)
@fb_overwritable()
def _smart_parse_extended_backend(extended_backend):
"""
D2Go extends the definition of quantization "backend". In addition to PyTorch's
native backends (i.e. qnnpack and fbgemm), we allow other type of backend so users
can easily express different settings. Here are the supported cases:
1. Symmetric quantization: "qnnpack@symmetric" refers to using QNNPACK with
symmetric QConfig.
"""
backend = extended_backend
# default options
options = {
"is_symmetric": False,
}
if "@symmetric" in backend:
options["is_symmetric"] = True
backend = backend.replace("@symmetric", "", 1)
validate_native_backend(backend)
return backend, options
def smart_decode_backend(extended_backend):
"""
Since we extend the definition of quantization backend, user shouldn't directly use
cfg.QUANTIZATION.BACKEND under PyTorch's context, this is the translation function
if direct use is necessary.
"""
return _smart_parse_extended_backend(extended_backend)[0]
|
d2go-main
|
d2go/quantization/qconfig.py
|
d2go-main
|
d2go/quantization/__init__.py
|
|
#!/usr/bin/env python3
import logging
from functools import partial
import torch
import torch.distributed as dist
from d2go.utils.parse_module_params import iterate_module_named_parameters
from torch.ao.quantization._learnable_fake_quantize import _LearnableFakeQuantize
logger = logging.getLogger(__name__)
def mixin_with_subclass(module, mix_class):
"""Create a subclass of type(module) and mix_class while using all the data
from the `module` object
"""
ModuleType = type(module)
class SubClass(mix_class, ModuleType):
def __init__(self, module):
assert isinstance(module, ModuleType)
# initialize the parent by copying the dict directly
self.__dict__ = module.__dict__.copy()
ret = SubClass(module)
return ret
def _has_module(model, module_type):
for x in model.modules():
if isinstance(x, module_type):
return True
return False
def check_for_learnable_fake_quant_ops(qat_method, model):
"""Make sure learnable observers are used if qat method is `learnable`"""
if qat_method.startswith("learnable"):
if not _has_module(model, _LearnableFakeQuantize):
raise Exception(
"No learnable fake quant is used for learnable quantzation, please use d2go.quantization.learnable_qat.get_learnable_qat_qconfig() to get proper qconfig"
)
def convert_to_learnable_qconfig(qconfig):
"""
Convert a QConfig to its learnable counterpart.
"""
def _update_fused_moving_avg_obs_fake_quantize(keywords):
# requires setting use_grad_scaling to True, all other parameters are the same
# as default setting of FusedMovingAvgObsFakeQuantize (both qnnpack and fbgemm).
assert "use_grad_scaling" not in keywords
keywords["use_grad_scaling"] = True
return keywords
_OVERWRITE_PARAMS = {
# map from supported FakeQuant type to the its new parameters in order to convert
# it to a learnable FakeQuant
torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize: _update_fused_moving_avg_obs_fake_quantize
}
def _update_to_learnable(wrapper):
assert isinstance(
wrapper, torch.ao.quantization.observer._PartialWrapper
), wrapper
assert isinstance(wrapper.p, partial), wrapper
keywords_updater = _OVERWRITE_PARAMS[wrapper.p.func]
keywords = keywords_updater(wrapper.p.keywords)
new_p = partial(_LearnableFakeQuantize, *wrapper.p.args, **keywords)
wrapper.p = new_p
return wrapper
activation = _update_to_learnable(qconfig.activation)
weight = _update_to_learnable(qconfig.weight)
return torch.quantization.QConfig(activation=activation, weight=weight)
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def sync_tensor(data):
world_size = get_world_size()
if world_size > 1:
dist.all_reduce(data, op=dist.ReduceOp.SUM)
data /= world_size
def toggle_lqat_fake_quant(mod, enable):
"""Toggle fake quantization for learnable qat"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_fake_quant(enable)
# enable/disable fake quantization for learnable qat
enable_lqat_fake_quant = partial(toggle_lqat_fake_quant, enable=True)
disable_lqat_fake_quant = partial(toggle_lqat_fake_quant, enable=False)
def toggle_lqat_static_observer(mod, enable):
"""Toggle static observers for learnable qat"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_observer_update(enable)
# enable/disable static observer for learnable qat
enable_lqat_static_observer = partial(toggle_lqat_static_observer, enable=True)
disable_lqat_static_observer = partial(toggle_lqat_static_observer, enable=False)
def enable_lqat_learnable_observer(mod):
"""Enable learning observers, will disable static observer updates"""
if type(mod) == _LearnableFakeQuantize:
sync_tensor(mod.scale.data)
sync_tensor(mod.zero_point.data)
mod.toggle_qparam_learning(enabled=True).toggle_observer_update(enabled=False)
def disable_lqat_learnable_observer(mod):
"""Disable learning observers"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_qparam_learning(enabled=False)
def get_optimizer_param_groups_learnable_qat(model, _):
"""Set the weight decay for scale/zero_point for learnable_fake_quant to 0"""
params = []
for (
_module_name,
module,
module_param_name,
value,
) in iterate_module_named_parameters(model, check_requires_grad=False):
if isinstance(module, _LearnableFakeQuantize):
if module_param_name in ("scale", "zero_point"):
params += [
{
"params": [value],
"weight_decay": 0.0,
}
]
return params
def _is_observer_key(state_dict_key):
observer_keys = ["activation_post_process", "weight_fake_quant"]
return any(x in state_dict_key for x in observer_keys)
def _is_q_state_dict(state_dict):
return any(_is_observer_key(k) for k in state_dict)
class ModelGetOptimizerParamGroupLearnableQATMixin:
def get_optimizer_param_groups(self, opts):
ret = []
if hasattr(super(), "get_optimizer_param_groups"):
ret = super().get_optimizer_param_groups(opts)
ret += get_optimizer_param_groups_learnable_qat(self, opts)
return ret
def setup_qat_get_optimizer_param_groups(model, qat_method):
"""Add a function `get_optimizer_param_groups` to the model so that it could
return proper weight decay for learnable qat
"""
if not qat_method.startswith("learnable"):
return model
assert _is_q_state_dict(model.state_dict())
model = mixin_with_subclass(model, ModelGetOptimizerParamGroupLearnableQATMixin)
assert hasattr(model, "get_optimizer_param_groups")
return model
|
d2go-main
|
d2go/quantization/learnable_qat.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Tuple
import torch
from mobile_cv.common.misc.oss_utils import fb_overwritable
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize import convert
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
@fb_overwritable()
def get_prepare_fx_fn(cfg, is_qat):
return prepare_qat_fx if is_qat else prepare_fx
@fb_overwritable()
def get_convert_fn(cfg, example_inputs=None, qconfig_mapping=None, backend_config=None):
if cfg.QUANTIZATION.EAGER_MODE:
return convert
else:
return convert_fx
|
d2go-main
|
d2go/quantization/fx.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import math
from typing import Any, Dict, Tuple
import detectron2.utils.comm as comm
import torch
from d2go.quantization import learnable_qat
from d2go.quantization.fx import get_convert_fn, get_prepare_fx_fn
from d2go.quantization.qconfig import (
set_backend_and_create_qconfig,
smart_decode_backend,
)
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine.train_loop import HookBase, SimpleTrainer
from detectron2.utils.file_io import PathManager
from mobile_cv.arch.quantization.observer import update_stat as observer_update_stat
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.common.misc.iter_utils import recursive_iterate
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize import convert
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
logger = logging.getLogger(__name__)
_CONVERT_FX_CALLBACK_ATTRIBUTE = "_convert_fx_callback"
_STATE_DICT_KEY = "state_dict"
_OLD_STATE_DICT_KEY = "model"
_OLD_EMA_KEY = "ema_state"
def _is_observer_key(state_dict_key):
observer_keys = ["activation_post_process", "weight_fake_quant"]
return any(x in state_dict_key for x in observer_keys)
# TODO: replace QATCheckpointer with central D2GoCheckpointer which supports customize
# state_dict re-mapping (which includes QAT re-mapping).
class QATCheckpointer(DetectionCheckpointer):
"""
Extend the Checkpointer to support loading (QAT / non-QAT) weight into
(QAT / non-QAT) model.
"""
def __init__(
self,
model,
save_dir="",
*,
load_ckpt_to_gpu=False,
save_to_disk=None,
**checkpointables,
):
super().__init__(
model,
save_dir,
save_to_disk=save_to_disk,
**checkpointables,
)
self.load_ckpt_to_gpu = load_ckpt_to_gpu
@classmethod
def _is_q_state_dict(cls, state_dict):
return any(_is_observer_key(k) for k in state_dict)
# HACK: temporarily put it here, move to centrail D2GoCheckpointer later on
def _load_file(self, filename):
# support loading lightning checkpointer
if filename.endswith(".ckpt"):
# assume file is from lightning; no one else seems to use the ".ckpt" extension
with PathManager.open(filename, "rb") as f:
data = self._torch_load(f)
_convert_to_d2(data)
return data
return super()._load_file(filename)
def _torch_load(self, f):
device = (
"cuda:{}".format(torch.cuda.current_device())
if self.load_ckpt_to_gpu
else "cpu"
)
return torch.load(f, map_location=torch.device(device))
def _load_model(self, checkpoint):
model_is_qat = self._is_q_state_dict(self.model.state_dict())
checkpoint_is_qat = self._is_q_state_dict(checkpoint["model"])
if model_is_qat and not checkpoint_is_qat:
logger.info("Loading QAT model with non-QAT checkpoint, ignore observers!")
mapping = getattr(self.model, "_non_qat_to_qat_state_dict_map", {})
# map the key from non-QAT model to QAT model if possible
checkpoint_state_dict = {
mapping.get(k, k): v for k, v in checkpoint["model"].items()
}
checkpoint["model"] = checkpoint_state_dict
incompatible = super()._load_model(checkpoint)
# suppress the missing observer keys warning
# NOTE: for some reason incompatible.missing_keys can have duplicated keys,
# here we replace the entire list rather than calling .remove()
missing_non_qat_keys = [
k for k in incompatible.missing_keys if not _is_observer_key(k)
]
incompatible.missing_keys[:] = missing_non_qat_keys
return incompatible
elif not model_is_qat and checkpoint_is_qat:
raise NotImplementedError()
elif model_is_qat and checkpoint_is_qat:
# TODO: maybe suppress shape mismatch
# For models trained with QAT and per-channel quant, the inital size of the
# buffers in fake_quant and observer modules does not reflect the size in
# state_dict, which is updated only when convert is called.
return super()._load_model(checkpoint)
else:
return super()._load_model(checkpoint)
def add_quantization_default_configs(_C):
CfgNode = type(_C)
_C.QUANTIZATION = CfgNode()
# Note: EAGER_MODE == False currently represents FX graph mode quantization
_C.QUANTIZATION.EAGER_MODE = True
# Available backends include PyTorch's natively supported backends (i.e. fbgemm and
# qnnpack), plus D2Go-defined backends such as "qnnpack@symmetric".
_C.QUANTIZATION.BACKEND = "fbgemm"
# used to enable metarch set_custom_qscheme (need to implement)
# this is a limited implementation where only str is provided to change options
_C.QUANTIZATION.CUSTOM_QSCHEME = ""
_C.QUANTIZATION.MODULES = []
# Lightning quantization callback name
_C.QUANTIZATION.NAME = ""
_C.QUANTIZATION.ACT_BITS = 8
_C.QUANTIZATION.WEIGHT_BITS = 8
# quantization-aware training
_C.QUANTIZATION.QAT = CfgNode()
_C.QUANTIZATION.QAT.ENABLED = False
# Methods for QAT training, could be "default" or "learnable"
_C.QUANTIZATION.QAT.FAKE_QUANT_METHOD = "default"
# QAT will use more GPU memory, user can change this factor to reduce the batch size
# after fake quant is enabled. Setting it to 0.5 should guarantee no memory increase
# compared with QAT is disabled.
_C.QUANTIZATION.QAT.BATCH_SIZE_FACTOR = 1.0
# the iteration number to start QAT, (i.e. enable fake quant). The default value of
# SOLVER.MAX_ITER is 40k and SOLVER.STEPS is (30k,), here we turn on QAT at 35k, so
# the last 5k iterations will run with QAT with decreased learning rate.
_C.QUANTIZATION.QAT.START_ITER = 35000
# the iteration number to enable observer, it's usually set to be the same as
# QUANTIZATION.QAT.START_ITER.
_C.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER = 35000
# the iteration number to enable learnable observer, only used when METHOD == "learnable"
_C.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER = 36000
# the iteration number to disable observer, here it's 3k after enabling the fake
# quant, 3k roughly corresponds to 7 out of 90 epochs in classification.
_C.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER = 35000 + 3000
# the iteration number to freeze BN, here it's 3k after enabling the fake quant, 2k
# roughly corresponds to 5 out of 90 epochs for classification.
_C.QUANTIZATION.QAT.FREEZE_BN_ITER = 35000 + 2000
# qat hook will run observers update_stat if it exists
# after update_observer_stats_period iters
_C.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY = False
_C.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD = 1
_C.QUANTIZATION.WEIGHT_OBSERVERS = None
_C.QUANTIZATION.ACTIVATION_OBSERVERS = None
# post-training quantization
_C.QUANTIZATION.PTQ = CfgNode()
_C.QUANTIZATION.PTQ.CALIBRATION_NUM_IMAGES = 16 # NOTE: this is actually iterations
_C.QUANTIZATION.PTQ.CALIBRATION_FORCE_ON_GPU = False
# register deprecated and renamed keys
_C.register_deprecated_key("QUANTIZATION.QAT.LOAD_PRETRAINED")
_C.register_renamed_key("QUANTIZATION.QAT.BACKEND", "QUANTIZATION.BACKEND")
_C.register_deprecated_key("QUANTIZATION.ENABLE_CUSTOM_QSCHEME")
_C.register_deprecated_key("QUANTIZATION.SILICON_QAT")
_C.register_deprecated_key("QUANTIZATION.SILICON_QAT.ENABLED")
# TODO: model.to(device) might not work for detection meta-arch, this function is the
# workaround, in general, we might need a meta-arch API for this if needed.
def _cast_model_to_device(model, device):
if hasattr(
model, "_cast_model_to_device"
): # we can make this formal by removing "_"
return model._cast_model_to_device(device)
else:
logger.warning(
"model.to(device) doesn't guarentee moving the entire model, "
"if customization is needed, please implement _cast_model_to_device "
"for the MetaArch"
)
return model.to(device)
def add_d2_quant_mapping(mappings):
"""HACK: Add d2 specific module mapping for eager model quantization"""
import torch.ao.quantization.quantization_mappings as qm
for k, v in mappings.items():
if k not in qm.get_default_static_quant_module_mappings():
qm.DEFAULT_STATIC_QUANT_MODULE_MAPPINGS[k] = v
if k not in qm.get_default_qat_module_mappings():
qm.DEFAULT_QAT_MODULE_MAPPINGS[k] = v
# The `mock_quantization_type` decorate may not be needed anymore to unify
# detectron2.layers modules and torch.nn modules since Pytorch 1.5. See comments on D23790034.
def mock_quantization_type(quant_func):
import builtins
import functools
from unittest import mock
import detectron2.layers as d2l
type_mapping = {d2l.Linear: torch.nn.Linear}
from d2go.utils.misc import check_version
if check_version(torch, "1.7.2", warning_only=True):
add_d2_quant_mapping(type_mapping)
real_type = builtins.type
def _new_type(obj):
rtype = real_type(obj)
return type_mapping.get(rtype, rtype)
@functools.wraps(quant_func)
def wrapper(cfg, model, *args, **kwargs):
if d2l.Linear == torch.nn.Linear:
# we do not need the moc after when the type is expected, consider
# remving those related code
logger.warning(
"`detectron2.layers.Linear` is in expected type (torch.nn.Linear),"
"consider removing this code `mock_quantization_type`."
)
return quant_func(cfg, model, *args, **kwargs)
if not cfg.QUANTIZATION.EAGER_MODE:
return quant_func(cfg, model, *args, **kwargs)
# `from_float()` in `torch.nn.quantized.modules.linear.Linear` and
# `torch.nn.qat.modules.linear` checkes if the type of `mod` is torch.Linear,
# hack it to return the expected value
with mock.patch("torch.nn.quantized.modules.linear.type") as mock_type:
with mock.patch("torch.nn.qat.modules.linear.type") as mock_type2:
mock_type.side_effect = _new_type
mock_type2.side_effect = _new_type
return quant_func(cfg, model, *args, **kwargs)
return wrapper
def default_prepare_for_quant(cfg, model):
"""
Default implementation of preparing a model for quantization. This function will
be called to before training if QAT is enabled, or before calibration during PTQ if
the model is not already quantized.
NOTE:
- This is the simplest implementation, most meta-arch needs its own version.
- For eager model, user should make sure the returned model has Quant/DeQuant
insert. This can be done by wrapping the model or defining the model with
quant stubs.
- QAT/PTQ can be determined by model.training.
- Currently the input model can be changed inplace since we won't re-use the
input model.
- Currently this API doesn't include the final torch.ao.quantization.prepare(_qat)
call since existing usecases don't have further steps after it.
Args:
model (nn.Module): a non-quantized model.
cfg (CfgNode): config
Return:
nn.Module: a ready model for QAT training or PTQ calibration
"""
assert cfg.QUANTIZATION.EAGER_MODE
qconfig = set_backend_and_create_qconfig(cfg, is_train=model.training)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
model.qconfig = qconfig
# TODO(future diff): move the torch.ao.quantization.prepare(...) call
# here, to be consistent with the FX branch
logger.info("Setup the model with qconfig:\n{}".format(qconfig))
return model
def default_custom_prepare_fx(cfg, model, is_qat, example_input=None):
"""
Similar to default_prepare_for_quant, but for FX graph mode.
Args:
example_input (Optional[Any]): optional example_input for model,
if it is not provided we'll use `model.example_input` when example_input
is required, Note: d2go assumes we always have a single example_input
"""
assert not cfg.QUANTIZATION.EAGER_MODE
qconfig = set_backend_and_create_qconfig(cfg, is_train=is_qat)
qconfig_dict = {"": qconfig}
if example_input is None:
raise NotImplementedError(
"prepare FX requires `example_input`, user should implement this for"
" their own MetaArch."
)
prepare_fn = get_prepare_fx_fn(cfg, is_qat)
model = prepare_fn(
model,
qconfig_mapping=qconfig_dict,
example_inputs=(example_input,),
)
convert_fn = get_convert_fn(cfg, (example_input,))
return model, convert_fn
def prepare_fake_quant_model(cfg, model, is_qat, example_input=None):
"""
Centralized function to prepare fp32 model (D2Go's MetaArch) to fake quant model.
"""
# TODO: create a warning for the direct use of `torch.ao.quantization.get_default_qconfig`
# or `torch.ao.quantization.get_default_qat_qconfig` without calling D2Go's high-level
# `set_backend_and_create_qconfig` API.
if cfg.QUANTIZATION.EAGER_MODE:
if hasattr(model, "prepare_for_quant"):
model = model.prepare_for_quant(cfg)
else:
logger.info(
"Using default implementation for prepare_for_quant (eager mode)"
)
model = default_prepare_for_quant(cfg, model)
# NOTE: eager model needs to call prepare after `prepare_for_quant`
if is_qat:
torch.ao.quantization.prepare_qat(model, inplace=True)
else:
torch.ao.quantization.prepare(model, inplace=True)
else:
# FX graph mode requires the model to be symbolically traceable, swap common
# modules like SyncBN to FX-friendly version.
if not is_qat:
# NOTE: we only do this for PTQ, because we want to keep using unmodified
# model during QAT.
model = fuse_utils.swap_modules(model)
if hasattr(model, "custom_prepare_fx"):
ret = model.custom_prepare_fx(cfg, is_qat, example_input)
if not (isinstance(ret, tuple) and len(ret) == 2):
raise ValueError(
"`custom_prepare_fx` requires return model and convert_callback"
)
model, convert_fx_callback = ret
else:
logger.info(
"Using default implementation for custom_prepare_fx (FX graph mode)"
)
model, convert_fx_callback = default_custom_prepare_fx(
cfg, model, is_qat, example_input
)
# HACK: store the convert_callback function as model attribute, which can be
# later accessed to convert fake quant model to quantized model. We'll find a
# better place to store this.
if hasattr(model, _CONVERT_FX_CALLBACK_ATTRIBUTE):
raise AttributeError(
f"{_CONVERT_FX_CALLBACK_ATTRIBUTE} is already set in model: {model}"
)
setattr(model, _CONVERT_FX_CALLBACK_ATTRIBUTE, convert_fx_callback)
return model
def convert_to_quantized_model(cfg, fp32_model):
"""
Contralized function to convert fake quant model (fp32 operators) to "real"
quantized model (int8 operators).
"""
if cfg.QUANTIZATION.EAGER_MODE:
convert_fn = get_convert_fn(cfg)
int8_model = convert_fn(fp32_model, inplace=False)
else:
# FX graph mode quantization
if not hasattr(fp32_model, _CONVERT_FX_CALLBACK_ATTRIBUTE):
raise AttributeError(
f"Can't find {_CONVERT_FX_CALLBACK_ATTRIBUTE} in model, please check "
f"`prepare_fake_quant_model` has been called: {fp32_model}"
)
convert_fx_callback = getattr(fp32_model, _CONVERT_FX_CALLBACK_ATTRIBUTE)
int8_model = convert_fx_callback(fp32_model)
return int8_model
@mock_quantization_type
def post_training_quantize(cfg, model, data_loader):
"""Calibrate a model, convert it to a quantized pytorch model"""
model = copy.deepcopy(model)
model.eval()
# TODO: check why some parameters will have gradient
for param in model.parameters():
param.requires_grad = False
example_input = next(iter(data_loader))
model = prepare_fake_quant_model(cfg, model, False, example_input)
logger.info("Prepared the PTQ model for calibration:\n{}".format(model))
# Option for forcing running calibration on GPU, works only when the model supports
# casting both model and inputs.
calibration_force_on_gpu = (
cfg.QUANTIZATION.PTQ.CALIBRATION_FORCE_ON_GPU and torch.cuda.is_available()
)
if calibration_force_on_gpu:
# NOTE: model.to(device) may not handle cases such as normalizer, FPN, only
# do move to GPU if specified.
_cast_model_to_device(model, "cuda")
calibration_iters = cfg.QUANTIZATION.PTQ.CALIBRATION_NUM_IMAGES
for idx, inputs in enumerate(data_loader):
# Setting CALIBRATION_NUM_IMAGES to 0 allows skipping calibration
if idx == calibration_iters:
break
logger.info("Running calibration iter: {}/{}".format(idx, calibration_iters))
if calibration_force_on_gpu:
iters = recursive_iterate(inputs)
for x in iters:
if isinstance(x, torch.Tensor):
iters.send(x.to("cuda"))
inputs = iters.value
with torch.no_grad():
model(inputs)
else:
logger.warning("Can't run enough calibration iterations")
# cast model back to the original device
if calibration_force_on_gpu:
_cast_model_to_device(model, cfg.MODEL.DEVICE)
return model
@mock_quantization_type
def setup_qat_model(
cfg,
model_fp32,
enable_fake_quant: bool = False,
enable_observer: bool = False,
enable_learnable_observer: bool = False,
):
assert cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD in [
"default",
"learnable",
"learnable_act",
]
if hasattr(model_fp32, "_non_qat_to_qat_state_dict_map"):
raise RuntimeError("The model is already setup to be QAT, cannot setup again!")
device = model_fp32.device
# FIXME: seems that we can remove this
torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
qat_method = cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD
# prepare for qat may modify the fp32 model directly so we create a copy
model_fp32_state_dict = model_fp32.state_dict()
# prepare model for qat
model = prepare_fake_quant_model(cfg, model_fp32, True)
# make sure the proper qconfig are used in the model
learnable_qat.check_for_learnable_fake_quant_ops(qat_method, model)
# Move newly added observers to the original device
model.to(device)
if not enable_fake_quant:
logger.info("Disabling fake quant ...")
model.apply(torch.ao.quantization.disable_fake_quant)
model.apply(learnable_qat.disable_lqat_fake_quant)
if not enable_observer:
logger.info("Disabling static observer ...")
model.apply(torch.ao.quantization.disable_observer)
model.apply(learnable_qat.disable_lqat_static_observer)
if not enable_learnable_observer and qat_method.startswith("learnable"):
logger.info("Disabling learnable observer ...")
model.apply(learnable_qat.disable_lqat_learnable_observer)
# qat state dict mapper
if not getattr(model, "_non_qat_to_qat_state_dict_map", None):
model = _setup_non_qat_to_qat_state_dict_map(
model_fp32_state_dict, model, is_eager_mode=cfg.QUANTIZATION.EAGER_MODE
)
# qat optimizer group for learnable qat
model = learnable_qat.setup_qat_get_optimizer_param_groups(model, qat_method)
return model
def _setup_non_qat_to_qat_state_dict_map(
model_fp32_state_dict, model_qat, is_eager_mode
):
original_state_dict_shapes = {k: v.shape for k, v in model_fp32_state_dict.items()}
# fuse_model and prepare_qat may change the state_dict of model, keep a map from the
# orginal model to the key QAT in order to load weight from non-QAT model.
new_state_dict_shapes = {k: v.shape for k, v in model_qat.state_dict().items()}
new_state_dict_non_observer_keys = [
k for k in new_state_dict_shapes if not _is_observer_key(k)
]
assert len(new_state_dict_non_observer_keys) == len(original_state_dict_shapes)
if is_eager_mode:
for n_k, o_k in zip(
new_state_dict_non_observer_keys, original_state_dict_shapes
):
assert (
new_state_dict_shapes[n_k] == original_state_dict_shapes[o_k]
), f"QAT model shapes is inconsistent. FP32.{o_k}={original_state_dict_shapes[o_k]} , QAT.{n_k}={new_state_dict_shapes[n_k]}"
# _q_state_dict_map will store
model_qat._non_qat_to_qat_state_dict_map = dict(
zip(original_state_dict_shapes, new_state_dict_non_observer_keys)
)
else:
# in FX, the order of where modules appear in the state_dict may change,
# so we need to match by key
def get_new_bn_key(old_bn_key):
# tries to adjust the key for conv-bn fusion, where
# root
# - conv
# - bn
#
# becomes
#
# root
# - conv
# - bn
return old_bn_key.replace(".bn.", ".conv.bn.")
model_qat._non_qat_to_qat_state_dict_map = {}
for key in original_state_dict_shapes.keys():
if key in new_state_dict_non_observer_keys:
model_qat._non_qat_to_qat_state_dict_map[key] = key
else:
maybe_new_bn_key = get_new_bn_key(key)
if maybe_new_bn_key in new_state_dict_non_observer_keys:
model_qat._non_qat_to_qat_state_dict_map[key] = maybe_new_bn_key
return model_qat
class QATHook(HookBase):
def __init__(self, cfg, build_data_loader_func=None):
self.cfg = cfg
self.build_data_loader_func = build_data_loader_func
self._applied = {
"enable_fake_quant": False,
"enable_observer": False,
"enable_learnable_observer": False,
"disable_observer": False,
"freeze_bn_stats": False,
}
assert (
cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER
<= cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
), "Can't diable observer before enabling it"
def before_step(self):
cur_iter = self.trainer.iter
model = self.trainer.model
cfg = self.cfg
if (
not self._applied["enable_fake_quant"]
and cur_iter >= cfg.QUANTIZATION.QAT.START_ITER
):
logger.info(
"[QAT] enable fake quant to start QAT, iter = {}".format(cur_iter)
)
model.apply(torch.ao.quantization.enable_fake_quant)
model.apply(learnable_qat.enable_lqat_fake_quant)
self._applied["enable_fake_quant"] = True
_reset_qat_data_loader_if_needed(
self.cfg, self.trainer, self.build_data_loader_func
)
if (
not self._applied["enable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER
and cur_iter < cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
):
logger.info("[QAT] enable static observer, iter = {}".format(cur_iter))
model.apply(torch.ao.quantization.enable_observer)
model.apply(learnable_qat.enable_lqat_static_observer)
self._applied["enable_observer"] = True
if (
not self._applied["enable_learnable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER
):
logger.info(f"[QAT] enabling learnable observer, iter = {cur_iter}")
model.apply(learnable_qat.enable_lqat_learnable_observer)
self._applied["enable_learnable_observer"] = True
if (
not self._applied["disable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
):
logger.info(
"[QAT] disabling observer for sub seq iters, iter = {}".format(cur_iter)
)
model.apply(torch.ao.quantization.disable_observer)
model.apply(learnable_qat.disable_lqat_static_observer)
model.apply(learnable_qat.disable_lqat_learnable_observer)
self._applied["disable_observer"] = True
if (
not self._applied["freeze_bn_stats"]
and cur_iter >= cfg.QUANTIZATION.QAT.FREEZE_BN_ITER
):
logger.info(
"[QAT] freezing BN for subseq iters, iter = {}".format(cur_iter)
)
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
self._applied["freeze_bn_stats"] = True
if (
self._applied["enable_fake_quant"]
and not self._applied["disable_observer"]
and cfg.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY
and cur_iter % cfg.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD == 0
):
logger.info(f"[QAT] updating observers, iter = {cur_iter}")
model.apply(observer_update_stat)
def _reset_qat_data_loader_if_needed(cfg, trainer, build_loader_func):
if cfg.QUANTIZATION.QAT.BATCH_SIZE_FACTOR != 1.0:
loader_cfg = cfg.clone()
loader_cfg.defrost()
num_gpus = comm.get_world_size()
old_bs = cfg.SOLVER.IMS_PER_BATCH // num_gpus
new_bs = math.ceil(old_bs * cfg.QUANTIZATION.QAT.BATCH_SIZE_FACTOR)
loader_cfg.SOLVER.IMS_PER_BATCH = new_bs * num_gpus
loader_cfg.freeze()
logger.info(
"[QAT] Rebuild data loader with batch size per GPU: {} -> {}".format(
old_bs, new_bs
)
)
assert isinstance(
trainer, SimpleTrainer
), "Trainer needs to be a subclass of SimpleTrainer to support resetting the dataloader"
trainer.reset_data_loader(lambda: build_loader_func(loader_cfg))
def forward_custom_prepare_fx(root, sub_module_name, orig_ret):
"""Helper function to forward return of `custom_prepare_fx` from sub module"""
new_sub_module, callback = orig_ret
setattr(root, sub_module_name, new_sub_module)
def new_callback(m):
setattr(m, sub_module_name, callback(getattr(m, sub_module_name)))
return m
return root, new_callback
def _convert_to_d2(lightning_checkpoint: Dict[str, Any]) -> None:
prefix = "model" # based on DefaultTask.model.
old_keys = [x.lstrip("model.") for x in lightning_checkpoint[_STATE_DICT_KEY]]
for key in old_keys:
if f"{prefix}.{key}" in lightning_checkpoint[_STATE_DICT_KEY]:
lightning_checkpoint[_STATE_DICT_KEY][key] = lightning_checkpoint[
_STATE_DICT_KEY
][f"{prefix}.{key}"]
del lightning_checkpoint[_STATE_DICT_KEY][f"{prefix}.{key}"]
for old, new in zip(
[_STATE_DICT_KEY, "global_step"], [_OLD_STATE_DICT_KEY, "iteration"]
):
lightning_checkpoint[new] = lightning_checkpoint[old]
del lightning_checkpoint[old]
for old, new in zip(
["optimizer_states", "lr_schedulers"], ["optimizer", "scheduler"]
):
if old not in lightning_checkpoint:
continue
lightning_checkpoint[new] = [lightning_checkpoint[old]]
del lightning_checkpoint[old]
for key in [
"epoch",
"pytorch-lightning_versio",
"callbacks",
"hparams_name",
"hyper_parameters",
]:
if key in lightning_checkpoint:
del lightning_checkpoint[key]
|
d2go-main
|
d2go/quantization/modeling.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import ast
import builtins
import contextlib
import glob
import hashlib
import logging
import os
import tempfile
import time
import traceback
from collections import defaultdict
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple
import pkg_resources
import yaml
from mobile_cv.common.misc.py import dynamic_import, MoreMagicMock
from mobile_cv.common.misc.registry import (
CLASS_OR_FUNCTION_TYPES,
LazyRegisterable,
Registry,
)
logger = logging.getLogger(__name__)
orig_import = builtins.__import__
orig_open = builtins.open
orig__register = Registry._register
_INSIDE_BOOTSTRAP = False
_IS_BOOTSTRAPPED = False
_BOOTSTRAP_PACKAGE = "d2go.registry._bootstrap"
_BOOTSTRAP_CACHE_FILENAME = "registry_bootstrap.v1.yaml"
def _log(lvl: int, msg: str):
_VERBOSE_LEVEL = 0
if _VERBOSE_LEVEL >= lvl:
print(msg)
# Simple version copied from fvcore/iopath
def _get_cache_dir() -> str:
cache_dir = os.path.expanduser("~/.torch/d2go_cache")
try:
os.makedirs(cache_dir, exist_ok=True)
assert os.access(cache_dir, os.R_OK | os.W_OK | os.X_OK)
except (OSError, AssertionError):
tmp_dir = os.path.join(tempfile.gettempdir(), "d2go_cache")
logger.warning(f"{cache_dir} is not accessible! Using {tmp_dir} instead!")
os.makedirs(tmp_dir, exist_ok=True)
cache_dir = tmp_dir
return cache_dir
class _catchtime:
def __enter__(self):
self.time = time.perf_counter()
return self
def __exit__(self, type, value, traceback):
self.time = time.perf_counter() - self.time
def _match(name, module_full_name, match_submodule=False):
if name == module_full_name:
return True
if match_submodule:
if name.startswith(module_full_name + "."):
return True
return False
def _match_any(name, module_full_names, match_submodule=False):
return any(
_match(name, module_full_name, match_submodule=match_submodule)
for module_full_name in module_full_names
)
def _import_mock(name, globals=None, locals=None, fromlist=(), level=0):
use_orig_import = False
# enable some first-party packages
if _match_any(
name,
[
# allow using pdb during patch
"pdb",
"readline",
"linecache",
"reprlib",
"io",
# allow using builtins.__import__
"builtins",
],
):
use_orig_import = True
# enable some known third-party packages, these pacakges might have been imported
if _match_any(
name,
[
# "torch",
# "numpy",
# "mobile_cv.arch.fbnet_v2.modeldef_utils",
],
):
use_orig_import = True
# enable modules under d2go.registry
if _match(name, "d2go.registry", match_submodule=True):
use_orig_import = True
if use_orig_import:
# import as normal
return orig_import(name, globals, locals, fromlist=fromlist, level=level)
else:
# return a Mock instead of making a real import
_log(2, f"mock import: {name}; fromlist={fromlist}; level={level}")
m = MoreMagicMock()
return m
def _open_mock(*args, **kwargs):
return MoreMagicMock()
def _register_mock(self, name: Optional[str], obj: Any) -> None:
"""Convert `obj` to LazyRegisterable"""
# Instead of register the (possibly mocked) object which is created under the
# "fake" package _BOOTSTRAP_PACKAGE, register a lazy-object (i.e. a string) pointing
# to its original (possibly un-imported) module.
def _resolve_real_module(module_in_bootstrap_package):
assert module_in_bootstrap_package.startswith(_BOOTSTRAP_PACKAGE + ".")
orig_module = module_in_bootstrap_package[len(_BOOTSTRAP_PACKAGE + ".") :]
return orig_module
if isinstance(obj, MoreMagicMock):
assert obj.mocked_obj_info is not None, obj
if name is None:
name = obj.mocked_obj_info["__name__"]
obj = LazyRegisterable(
module=_resolve_real_module(obj.mocked_obj_info["__module__"]),
name=obj.mocked_obj_info["__qualname__"],
)
elif isinstance(obj, LazyRegisterable):
pass
else:
assert isinstance(obj, CLASS_OR_FUNCTION_TYPES), obj
if name is None:
name = obj.__name__
obj = LazyRegisterable(
module=_resolve_real_module(obj.__module__), name=obj.__qualname__
)
assert isinstance(obj, LazyRegisterable)
# During bootstrap, it's possible that the object is already registered
# (as non-lazy), because importing a library first and then bootstramp it. Simply
# skip the lazy-registration.
if name in self and not isinstance(self[name], LazyRegisterable):
if self[name].__module__ == obj.module and (
obj.name is None or self[name].__name__ == obj.name
):
_log(2, f"{obj} has already registered as {self[name]}, skip...")
return
orig__register(self, name, obj)
@contextlib.contextmanager
def _bootstrap_patch():
global _INSIDE_BOOTSTRAP
builtins.__import__ = _import_mock
builtins.open = _open_mock
Registry._register = _register_mock
_INSIDE_BOOTSTRAP = True
try:
yield
finally:
builtins.__import__ = orig_import
builtins.open = orig_open
Registry._register = orig__register
_INSIDE_BOOTSTRAP = False
def _get_registered_names() -> Dict[str, List[str]]:
"""Return the currently registered names for each registry"""
# NOTE: currently only support D2Go's builtin registry module, which can be extended
# in future.
import d2go.registry.builtin
modules = [
d2go.registry.builtin,
]
registered = {}
for module in modules:
registered_in_module = {
f"{module.__name__}.{name}": obj.get_names()
for name, obj in module.__dict__.items()
if isinstance(obj, Registry)
}
registered.update(registered_in_module)
return registered
class BootstrapStatus(Enum):
CACHED = 0
FULLY_IMPORTED = 1
PARTIALLY_IMPORTED = 2
FAILED = 3
@dataclass
class CachedResult:
sha1: str
registered: Dict[str, str]
status: str # string representation of BootstrapStatus
def _bootstrap_file(
rel_path: str,
catch_exception: bool,
cached_result: Optional[CachedResult] = None,
) -> Tuple[CachedResult, BootstrapStatus]:
# convert relative path to full module name
# eg. ".../d2go/a/b/c.py" -> "d2go.a.b.c"
# eg. ".../d2go/a/b/__init__.py" -> "d2go.a.b"
package_root = os.path.dirname(pkg_resources.resource_filename("d2go", ""))
filename = os.path.join(package_root, rel_path)
assert rel_path.endswith(".py")
module = rel_path[: -len(".py")]
if module.endswith("/__init__"):
module = module[: -len("/__init__")]
module = module.replace("/", ".")
exec_globals = {
"__file__": filename,
# execute in a "fake" package to minimize potential side effect
"__name__": "{}.{}".format(_BOOTSTRAP_PACKAGE, module),
}
with _catchtime() as t:
with open(filename) as f:
content = f.read()
file_hash = hashlib.sha1(content.encode("utf-8")).hexdigest()
if cached_result is not None and file_hash == cached_result.sha1:
_log(
2,
f"Hash {file_hash} matches, lazy registering cached registerables ...",
)
registerables = cached_result.registered
for registry_module_dot_name, names_to_register in registerables.items():
registry = dynamic_import(registry_module_dot_name)
for name in names_to_register:
# we only store the registered name in the cache, here we know the
# module of bootstrapped file, which should be sufficient.
registry.register(name, LazyRegisterable(module=module))
return cached_result, BootstrapStatus.CACHED
tree = ast.parse(content)
# HACK: convert multiple inheritance to single inheritance, this is needed
# because current implementation of MoreMagicMock can't handle this well.
# eg. `class MyClass(MyMixin, nn.Module)` -> `class MyClass(MyMixin)`
def _truncate_multiple_inheritance(ast_tree):
for stmt in ast_tree.body:
if isinstance(stmt, ast.ClassDef):
if len(stmt.bases) > 1:
stmt.bases = stmt.bases[:1]
stmt.keywords.clear()
_truncate_multiple_inheritance(stmt)
_truncate_multiple_inheritance(tree)
_log(2, f"Parsing AST takes {t.time} sec")
prev_registered = _get_registered_names()
with _catchtime() as t:
try:
with _bootstrap_patch():
exec(compile(tree, filename, "exec"), exec_globals) # noqa
status = BootstrapStatus.FULLY_IMPORTED
except _BootstrapBreakException:
status = BootstrapStatus.PARTIALLY_IMPORTED
except Exception as e:
if catch_exception:
_log(
1,
"Encountered the following error during bootstrap:"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__)),
)
else:
raise e
status = BootstrapStatus.FAILED
_log(2, f"Execute file takes {t.time} sec")
# compare and get the newly registered
cur_registered = _get_registered_names()
assert set(cur_registered.keys()) == set(prev_registered.keys())
newly_registered = {
k: sorted(set(cur_registered[k]) - set(prev_registered[k]))
for k in sorted(cur_registered.keys())
}
newly_registered = {k: v for k, v in newly_registered.items() if len(v) > 0}
result = CachedResult(
sha1=file_hash,
registered=newly_registered,
status=status.name,
)
return result, status
class _BootstrapBreakException(Exception):
pass
def break_bootstrap():
"""
In case the file can't be perfectly executed by `_bootstrap_file`, users can call
this function to break the process. Because the remaining content in the file will
be skipped, avoid using registration statement after calling this function.
"""
if _INSIDE_BOOTSTRAP:
# raise a special exception which will be catched later
raise _BootstrapBreakException()
# non-op outside of bootstrap
return
def lazy_on_bootstrap(f: Callable) -> Callable:
"""
A decorator to mark a function as "lazy" during bootstrap, such that the decorated
function will skip the execution and immediately return a MagicMock object during
the bootstrap (the decorator is a non-op outside of bootstrap). This can be used to
hide un-executable code (usually related to import-time computation) during the
bootstrap.
For registration related import-time computation, please consider using the
`LazyRegisterable` since it will also save time for the normal import.
"""
def wrapped(*args, **kwargs):
if _INSIDE_BOOTSTRAP:
return MoreMagicMock()
else:
return f(*args, **kwargs)
return wrapped
def _load_cached_results(filename: str) -> Dict[str, CachedResult]:
with open(filename) as f:
content = f.read()
loaded = yaml.safe_load(content)
assert isinstance(loaded, dict), f"Wrong format: {content}"
results = {
filename: CachedResult(**result_dic) for filename, result_dic in loaded.items()
}
return results
def _dump_cached_results(cached_results: Dict[str, CachedResult], filename: str):
results_dict = {
filename: asdict(result_dic) for filename, result_dic in cached_results.items()
}
dumped = yaml.safe_dump(results_dict)
with open(filename, "w") as f:
f.write(dumped)
def bootstrap_registries(enable_cache: bool = True, catch_exception: bool = True):
"""
Bootstrap all registries so that all objects are effectively registered.
This function will "import" all the files from certain locations (eg. d2go package)
and look for a set of known registries (eg. d2go's builtin registries). The "import"
should not have any side effect, which is achieved by mocking builtin.__import__.
"""
global _IS_BOOTSTRAPPED
if _IS_BOOTSTRAPPED:
logger.warning("Registries are already bootstrapped, skipped!")
return
if _INSIDE_BOOTSTRAP:
_log(1, "calling bootstrap_registries() inside bootstrap process, skip ...")
return
start = time.perf_counter()
# load cached bootstrap results if exist
cached_bootstrap_results: Dict[str, CachedResult] = {}
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
if os.path.isfile(filename):
logger.info(f"Loading bootstrap cache at {filename} ...")
cached_bootstrap_results = _load_cached_results(filename)
else:
logger.info(
f"Can't find the bootstrap cache at {filename}, start from scratch"
)
# locate all the files under d2go package
# NOTE: we may extend to support user-defined locations if necessary
d2go_root = pkg_resources.resource_filename("d2go", "")
logger.info(f"Start bootstrapping for d2go_root: {d2go_root} ...")
all_files = glob.glob(f"{d2go_root}/**/*.py", recursive=True)
all_files = [os.path.relpath(x, os.path.dirname(d2go_root)) for x in all_files]
new_bootstrap_results: Dict[str, CachedResult] = {}
files_per_status = defaultdict(list)
time_per_file = {}
for filename in all_files:
_log(1, f"bootstrap for file: {filename}")
cached_result = cached_bootstrap_results.get(filename, None)
with _catchtime() as t:
result, status = _bootstrap_file(filename, catch_exception, cached_result)
new_bootstrap_results[filename] = result
files_per_status[status].append(filename)
time_per_file[filename] = t.time
end = time.perf_counter()
duration = end - start
status_breakdown = ", ".join(
[f"{len(files_per_status[status])} {status.name}" for status in BootstrapStatus]
)
logger.info(
f"Finished bootstrapping for {len(all_files)} files ({status_breakdown})"
f" in {duration:.2f} seconds."
)
exception_files = [
filename
for filename, result in new_bootstrap_results.items()
if result.status == BootstrapStatus.FAILED.name
]
if len(exception_files) > 0:
logger.warning(
"Found exception for the following {} files (either during this bootstrap"
" run or from previous cached result), registration inside those files"
" might not work!\n{}".format(
len(exception_files),
"\n".join(exception_files),
)
)
# Log slowest Top-N files
TOP_N = 100
_log(2, f"Top-{TOP_N} slowest files during bootstrap:")
all_time = [(os.path.relpath(k, d2go_root), v) for k, v in time_per_file.items()]
for x in sorted(all_time, key=lambda x: x[1])[-TOP_N:]:
_log(2, x)
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
logger.info(f"Writing updated bootstrap results to {filename} ...")
_dump_cached_results(new_bootstrap_results, filename)
_IS_BOOTSTRAPPED = True
|
d2go-main
|
d2go/registry/bootstrap.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
d2go-main
|
d2go/registry/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mobile_cv.common.misc.registry import Registry
"""
This file contains all D2Go's builtin registries with global scope.
- These registries can be treated as "static". There'll be a bootstrap process happens
at the beginning of the program to make it works like the registrations happen
at compile time (like C++). In another word, the objects are guaranteed to be
registered to those builtin registries without user importing their code.
- Since the namespace is global, the registered name has to be unique across all projects.
"""
DEMO_REGISTRY = Registry("DEMO")
# Registry for config updater
CONFIG_UPDATER_REGISTRY = Registry("CONFIG_UPDATER")
# Registry for meta-arch, registered nn.Module should follow D2Go's meta-arch API
META_ARCH_REGISTRY = Registry("META_ARCH")
# Modeling hook registry
MODELING_HOOK_REGISTRY = Registry("MODELING_HOOK")
MODELING_HOOK_REGISTRY.__doc__ = """
Registry for modeling hook.
The registered object will be called with `obj(cfg)`
and expected to return a `ModelingHook` object.
"""
# Distillation algorithms
DISTILLATION_ALGORITHM_REGISTRY = Registry("DISTILLATION_ALGORITHM")
# Distillation helper to allow user customization
DISTILLATION_HELPER_REGISTRY = Registry("DISTILLATION_HELPER")
|
d2go-main
|
d2go/registry/builtin.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.evaluation.prediction_count_evaluation import PredictionCountEvaluator
__all__ = [
"PredictionCountEvaluator",
]
# Populating registreis
# @fb-only: from d2go.evaluation import fb as _fb # noqa
|
d2go-main
|
d2go/evaluation/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, TypeVar, Union
T = TypeVar("T")
# "accuracy" in D2Go is defined by a 4-level dictionary in the order of:
# model_tag -> dataset -> task -> metrics
AccuracyDict = Dict[str, Dict[str, Dict[str, Dict[str, T]]]]
# "metric" in D2Go is a nested dictionary, which may have arbitrary levels.
MetricsDict = Union[Dict[str, "MetricsDict"], T]
|
d2go-main
|
d2go/evaluation/api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import heapq
import itertools
import logging
from contextlib import contextmanager
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator, SemSegEvaluator
from detectron2.utils.comm import all_gather, synchronize
logger = logging.getLogger(__name__)
class MultiSemSegEvaluator(DatasetEvaluator):
"""
Evaluate multiple results for the same target. SemSegEvaluator requires the
outputs of model to be like:
[
{"sem_seg": Tensor},
]
This evaluator allows evaluating mutliple predictions, it may takes outputs like:
[
{
"prediction_1": {"sem_seg": Tensor},
"prediction_2": {"sem_seg": Tensor},
}
]
"""
_DUMMY_KEY_PREFIX = "dummy_eval"
def __init__(self, dataset_name, *args, distributed, output_dir=None, **kwargs):
self._distributed = distributed
self._output_dir = output_dir
self.evaluators = {}
self.dataset_name = dataset_name
self.init_args = args
self.init_kwargs = kwargs
def _get_evaluator(self, key, superclass_name=None):
if key in self.evaluators:
return self.evaluators[key]
def create_evaluator_and_reset(dataset_name):
logger.info(
"Create an instance of SemSegEvaluator for {} on dataset {} ...".format(
key, dataset_name
)
)
evaluator = SemSegEvaluator(
dataset_name,
*self.init_args,
**self.init_kwargs,
distributed=self._distributed,
output_dir=self._output_dir,
)
evaluator.reset()
return evaluator
if superclass_name is None:
self.evaluators[key] = create_evaluator_and_reset(self.dataset_name)
else:
# NOTE: create temporary single-super-class dataset and use standard
# evaluator for the dataset
metadata = MetadataCatalog.get(self.dataset_name)
tmp_dataset_name = "__AUTOGEN__{}@{}".format(
self.dataset_name, superclass_name
)
from d2go.data.fb.semantic_seg import register_sem_seg
if tmp_dataset_name not in MetadataCatalog:
if superclass_name in metadata.pseudo_gt_classes:
mask_dir = metadata.pseudo_gt_mask_dir
else:
mask_dir = metadata.mask_dir
register_sem_seg(
tmp_dataset_name,
metadata=metadata.mcs_metadata[superclass_name],
image_root=metadata.image_root,
sem_seg_root=metadata.sem_seg_root,
instances_json=metadata.json_file,
mask_dir=mask_dir.format(superclass_name),
)
self.evaluators[key] = create_evaluator_and_reset(tmp_dataset_name)
return self.evaluators[key]
def reset(self):
for evaluator in self.evaluators.values():
evaluator.reset()
def process(self, inputs, outputs):
if "sem_seg" in outputs[0].keys():
# normal eval is compatible with SemSegEvaluator
self._get_evaluator("sem_seg").process(inputs, outputs)
else:
# only the file_name of inputs is needed for SemSegEvaluator
inputs_ = [{"file_name": inp["file_name"]} for inp in inputs]
for frame_name in outputs[0].keys():
if isinstance(outputs[0]["detect"]["sem_seg"], dict): # multi-class
for superclass_name in outputs[0]["detect"]["sem_seg"]:
outputs_ = []
for outp in outputs:
x = outp[frame_name]
x = {"sem_seg": x["sem_seg"][superclass_name]}
outputs_.append(x)
self._get_evaluator(
"sem_seg-{}-{}".format(frame_name, superclass_name),
superclass_name=superclass_name,
).process(inputs_, outputs_)
else:
# convert the output to SemSegEvaluator's format
outputs_ = [outp[frame_name] for outp in outputs]
self._get_evaluator("sem_seg-{}".format(frame_name)).process(
inputs_, outputs_
)
def evaluate(self):
results = {}
# The evaluation will get stuck sometimes if the follwoing code is not used.
# `SemSegEvaluator` will do synchronization between processes when computing
# the metrics. In some cases the number of self.evaluators will not be the
# same between processes and the code will stuck in synchronization.
# For example, evaluate 10 images on 8 GPUs, only 5 GPUs
# will be used for evaluation, each has 2 images, the rest 3 GPUs will have
# zero self.evaluators as they are constructed on-the-fly when calling
# self.process())
# We create additional evaluators so that all processes have the same size
# of evaluators so that the synchronization will not get stuck.
evaluator_size = len(self.evaluators)
synchronize()
evaluator_size_list = all_gather(evaluator_size)
max_evaluator_size = max(evaluator_size_list)
if evaluator_size < max_evaluator_size:
# create additional evaluators so that all processes have the same
# size of evaluators
metadata = MetadataCatalog.get(self.dataset_name)
mcs_metadata = metadata.get("mcs_metadata")
for idx in range(max_evaluator_size - evaluator_size):
dummy_key = f"{self._DUMMY_KEY_PREFIX}_{idx}"
assert dummy_key not in self.evaluators
if mcs_metadata:
for k in mcs_metadata:
self._get_evaluator(dummy_key, superclass_name=k).reset()
else:
self._get_evaluator(dummy_key).reset()
for name, evaluator in self.evaluators.items():
result = evaluator.evaluate()
# NOTE: .evaluate() returns None for non-main process
if result is not None:
results[name] = result["sem_seg"]
return results
class MultiSemSegVidEvaluator(MultiSemSegEvaluator):
"""
Evaluate semantic segmentation results for video clips. MultiSemSegVidEvaluator
requires the outputs of model to be like:
[
{"file_names": Tensor},
]
"""
def process(self, inputs, outputs):
assert "file_names" in inputs[0]
inputs_ = []
for batch_id in range(len(inputs)):
for frame_i in range(len(inputs[batch_id]["file_names"])):
inputs_.append({"file_name": inputs[batch_id]["file_names"][frame_i]})
for name in outputs[0].keys():
# convert the output to SemSegEvaluator's format
outputs_ = [outp[name] for outp in outputs]
self.evaluators["sem_seg_{}".format(name)].process(inputs_, outputs_)
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
class PerImageEvaluator(object):
def __init__(
self,
evaluator,
callback,
distributed=True,
playback_criterion=None,
playback_limit=0,
):
self._evaluator = evaluator
self._evaluator._distributed = False
self._evaluator._output_dir = None
self._distributed = distributed
self.callback = callback
self.results_per_image = []
# record the N most interesting results for playback
self.playback_heap = []
self.playback_criterion = playback_criterion
self.playback_limit = playback_limit
def reset(self):
self._evaluator.reset()
def process(self, inputs, outputs):
self._evaluator.process(inputs, outputs)
assert len(inputs) == 1
with all_logging_disabled():
result = self._evaluator.evaluate()
self.results_per_image.append((inputs[0], result))
if self.playback_criterion:
score = self.playback_criterion(result)
heapq.heappush(self.playback_heap, (score, inputs[0], outputs[0], result))
if len(self.playback_heap) > self.playback_limit:
heapq.heappop(self.playback_heap)
self._evaluator.reset()
def evaluate(self):
if self._distributed:
synchronize()
results_per_image = all_gather(self.results_per_image)
self.results_per_image = list(itertools.chain(*results_per_image))
playback_heap = all_gather(self.playback_heap)
playback_heap = list(itertools.chain(*playback_heap))
# each GPU has local N mininums, sort and take global mininums
playback_heap = sorted(playback_heap, key=lambda x: x[0])
self.playback_heap = playback_heap[: self.playback_limit]
self.callback(self)
return {}
|
d2go-main
|
d2go/evaluation/sem_seg_evaluation.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
from collections import OrderedDict
import detectron2.utils.comm as comm
import numpy as np
from detectron2.evaluation import DatasetEvaluator
logger = logging.getLogger(__name__)
class PredictionCountEvaluator(DatasetEvaluator):
"""
Custom Detectron2 evaluator class to simply count the number of predictions
e.g. on a dataset of hard negatives where there are no annotations, and
summarize results into interpretable metrics.
See class pattern from detectron2.evaluation.evaluator.py, especially
:func:`inference_on_dataset` to see how this class will be called.
"""
def __init__(self, distributed: bool = True):
self._distributed = distributed
self.prediction_counts = []
self.confidence_scores = []
def reset(self):
self.prediction_counts = []
self.confidence_scores = []
def process(self, inputs, outputs):
"""
Params:
input: the input that's used to call the model.
output: the return value of `model(output)`
"""
# outputs format:
# [{
# "instances": Instances(
# num_instances=88,
# fields=[scores = tensor([list of len num_instances])]
# ), ...
# },
# ... other dicts
# ]
for output_dict in outputs:
instances = output_dict["instances"]
self.prediction_counts.append(len(instances))
self.confidence_scores.extend(instances.get("scores").tolist())
def evaluate(self):
"""
Returns:
In detectron2.tools.train_net.py, following format expected:
dict:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
if self._distributed:
comm.synchronize()
prediction_counts = comm.gather(self.prediction_counts, dst=0)
prediction_counts = list(itertools.chain(*prediction_counts))
confidence_scores = comm.gather(self.confidence_scores, dst=0)
confidence_scores = list(itertools.chain(*confidence_scores))
if not comm.is_main_process():
return {}
else:
prediction_counts = self.prediction_counts
confidence_scores = self.confidence_scores
mpi = np.mean(prediction_counts)
mcp = np.mean(confidence_scores)
output_metrics = OrderedDict(
{
"false_positives": {
"predictions_per_image": mpi,
"confidence_per_prediction": mcp,
}
}
)
logger.info(f"mean predictions per image: {mpi}")
logger.info(f"mean confidence per prediction: {mcp}")
return output_metrics
|
d2go-main
|
d2go/evaluation/prediction_count_evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from collections import abc
from typing import Any, Iterable, List, Union
import torch
from detectron2.evaluation import (
DatasetEvaluator,
DatasetEvaluators,
inference_on_dataset as inference_on_dataset_d2,
)
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
logger = logging.getLogger(__name__)
def DatasetEvaluators_has_finished_process(self):
ret = True
for x in self._evaluators:
if hasattr(x, "has_finished_process"):
ret &= x.has_finished_process()
else:
ret &= False
return ret
# patch evaluators defined in d2
DatasetEvaluators.has_finished_process = DatasetEvaluators_has_finished_process
def inference_on_dataset(
model: torch.nn.Module,
data_loader: Iterable,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
):
"""
A drop-in replacement for d2's inference_on_dataset to run inference on datasets,
supports customization for checkpointing
* has_finished_process(self) -> bool: return True if `self.process()` could be skipped
"""
if evaluator is None:
return inference_on_dataset_d2(model, data_loader, evaluator)
if isinstance(evaluator, abc.MutableSequence):
evaluator = DatasetEvaluators(evaluator)
if not (
hasattr(evaluator, "has_finished_process") and evaluator.has_finished_process()
):
return inference_on_dataset_d2(model, data_loader, evaluator)
evaluator.reset()
results = evaluator.evaluate()
if results is None:
results = {}
return results
class ResultCache(object):
def __init__(self, cache_dir: str):
"""A utility class to handle save/load cache data across processes"""
self.cache_str = cache_dir
@property
def cache_file(self):
if self.cache_str is None:
return None
return os.path.join(self.cache_str, f"_result_cache_.{comm.get_rank()}.pkl")
def has_cache(self):
return PathManager.isfile(self.cache_file)
def load(self, gather: bool = False):
"""
Load cache results.
gather (bool): gather cache results arcoss ranks to a list
"""
if self.cache_file is None or not PathManager.exists(self.cache_file):
ret = None
else:
with PathManager.open(self.cache_file, "rb") as fp:
ret = torch.load(fp)
logger.info(f"Loaded from checkpoint {self.cache_file}")
if gather:
ret = comm.all_gather(ret)
return ret
def save(self, data: Any):
if self.cache_file is None:
return
PathManager.mkdirs(os.path.dirname(self.cache_file))
with PathManager.open(self.cache_file, "wb") as fp:
torch.save(data, fp)
logger.info(f"Saved checkpoint to {self.cache_file}")
|
d2go-main
|
d2go/evaluation/evaluator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
API for exporting a pytorch model to a predictor, the predictor contains model(s) in
deployable format and predefined functions as glue code. The exported predictor should
generate same output as the original pytorch model. (See predictor/api.py for details of
predictor)
This API defines customizable methods for the pytorch model:
prepare_for_export (required by the default export_predictor): returns
PredictorExportConfig which tells information about how export the predictor.
NOTE:
1: There's a difference between predictor type and model type. model type
refers to predefined deployable format such as caffe2, torchscript(_int8),
while the predictor type can be anything that "export_predictor" can
recognize.
2: The standard model exporting methods are provided by the library code, they're
meant to be modularized and can be used by customized export_predictor as well.
"""
import json
import logging
import os
from typing import Iterable
import torch.nn as nn
from d2go.config import CfgNode
from d2go.export.api import ModelExportMethod, ModelExportMethodRegistry
from d2go.quantization.modeling import (
convert_to_quantized_model,
post_training_quantize,
)
from detectron2.utils.file_io import PathManager
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.predictor.api import ModelInfo, PredictorInfo
logger = logging.getLogger(__name__)
def is_predictor_quantized(predictor_type: str) -> bool:
return "int8" in predictor_type
def convert_model(
cfg: CfgNode,
pytorch_model: nn.Module,
predictor_type: str,
data_loader: Iterable,
):
"""Converts pytorch model to pytorch model (fuse for fp32, fake quantize for int8)"""
return (
convert_quantized_model(cfg, pytorch_model, data_loader)
if is_predictor_quantized(predictor_type)
else _convert_fp_model(cfg, pytorch_model, data_loader)
)
def convert_quantized_model(
cfg: CfgNode, pytorch_model: nn.Module, data_loader: Iterable
) -> nn.Module:
if not cfg.QUANTIZATION.QAT.ENABLED:
# For PTQ, converts pytorch model to fake-quantized pytorch model. For QAT, the
# built pytorch model is already fake-quantized.
logger.info(
"The model is not quantized during training, running post"
" training quantization ..."
)
pytorch_model = post_training_quantize(cfg, pytorch_model, data_loader)
# only check bn exists in ptq as qat still has bn inside fused ops
if fuse_utils.check_bn_exist(pytorch_model):
logger.warn("Post training quantized model has bn inside fused ops")
logger.info(f"Converting quantized model {cfg.QUANTIZATION.BACKEND}...")
# convert the fake-quantized model to int8 model
pytorch_model = convert_to_quantized_model(cfg, pytorch_model)
logger.info(f"Quantized Model:\n{pytorch_model}")
return pytorch_model
def _convert_fp_model(
cfg: CfgNode, pytorch_model: nn.Module, data_loader: Iterable
) -> nn.Module:
"""Converts floating point predictor"""
pytorch_model = fuse_utils.fuse_model(pytorch_model)
logger.info(f"Fused Model:\n{pytorch_model}")
if fuse_utils.count_bn_exist(pytorch_model) > 0:
logger.warning("BN existed in pytorch model after fusing.")
return pytorch_model
def convert_and_export_predictor(
cfg,
pytorch_model,
predictor_type,
output_dir,
data_loader,
):
"""
Entry point for convert and export model. This involves two steps:
- convert: converting the given `pytorch_model` to another format, currently
mainly for quantizing the model.
- export: exporting the converted `pytorch_model` to predictor. This step
should not alter the behaviour of model.
"""
pytorch_model = convert_model(cfg, pytorch_model, predictor_type, data_loader)
return export_predictor(cfg, pytorch_model, predictor_type, output_dir, data_loader)
def export_predictor(cfg, pytorch_model, predictor_type, output_dir, data_loader):
"""
Interface for exporting a pytorch model to predictor of given type. This function
can be override to achieve customized exporting procedure, eg. using non-default
optimization passes, composing traced models, etc.
Args:
cfg (CfgNode): the config
pytorch_model (nn.Module): a pytorch model, mostly also a meta-arch
predictor_type (str): a string which specifies the type of predictor, note that
the definition of type is interpreted by "export_predictor", the default
implementation uses the deployable model format (eg. caffe2_fp32,
torchscript_int8) as predictor type.
output_dir (str): the parent directory where the predictor will be saved
data_loader: data loader for the pytorch model
Returns:
predictor_path (str): the directory of exported predictor, a sub-directory of
"output_dir"
"""
return default_export_predictor(
cfg, pytorch_model, predictor_type, output_dir, data_loader
)
def _export_single_model(
predictor_path,
model,
input_args,
save_path,
model_export_method,
model_export_kwargs,
):
assert isinstance(model, nn.Module), model
# model_export_method either inherits ModelExportMethod or is a key in the registry
model_export_method_str = None
if isinstance(model_export_method, str):
model_export_method_str = model_export_method
model_export_method = ModelExportMethodRegistry.get(model_export_method)
assert issubclass(model_export_method, ModelExportMethod), model_export_method
logger.info(f"Using model export method: {model_export_method}")
load_kwargs = model_export_method.export(
model=model,
input_args=input_args,
save_path=save_path,
export_method=model_export_method_str,
**model_export_kwargs,
)
assert isinstance(load_kwargs, dict)
model_rel_path = os.path.relpath(save_path, predictor_path)
return ModelInfo(
path=model_rel_path,
export_method=f"{model_export_method.__module__}.{model_export_method.__qualname__}",
load_kwargs=load_kwargs,
)
def default_export_predictor(
cfg, pytorch_model, predictor_type, output_dir, data_loader
):
# The default implementation acts based on the PredictorExportConfig returned by
# calling "prepare_for_export". It'll export all sub models in standard way
# according to the "predictor_type".
assert hasattr(pytorch_model, "prepare_for_export"), pytorch_model
inputs = next(iter(data_loader))
export_config = pytorch_model.prepare_for_export(cfg, inputs, predictor_type)
model_inputs = (
export_config.data_generator(inputs)
if export_config.data_generator is not None
else (inputs,)
)
predictor_path = os.path.join(output_dir, predictor_type)
PathManager.mkdirs(predictor_path)
predictor_init_kwargs = {
"preprocess_info": export_config.preprocess_info,
"postprocess_info": export_config.postprocess_info,
"run_func_info": export_config.run_func_info,
}
if isinstance(export_config.model, dict):
models_info = {}
for name, model in export_config.model.items():
save_path = os.path.join(predictor_path, name)
model_info = _export_single_model(
predictor_path=predictor_path,
model=model,
input_args=model_inputs[name] if model_inputs is not None else None,
save_path=save_path,
model_export_method=(
predictor_type
if export_config.model_export_method is None
else export_config.model_export_method[name]
),
model_export_kwargs=(
{}
if export_config.model_export_kwargs is None
else export_config.model_export_kwargs[name]
),
)
models_info[name] = model_info
predictor_init_kwargs["models"] = models_info
else:
save_path = predictor_path # for single model exported files are put under `predictor_path` together with predictor_info.json
model_info = _export_single_model(
predictor_path=predictor_path,
model=export_config.model,
input_args=model_inputs,
save_path=save_path,
model_export_method=export_config.model_export_method or predictor_type,
model_export_kwargs=export_config.model_export_kwargs or {},
)
predictor_init_kwargs["model"] = model_info
# assemble predictor
predictor_info = PredictorInfo(**predictor_init_kwargs)
with PathManager.open(
os.path.join(predictor_path, "predictor_info.json"), "w"
) as f:
json.dump(predictor_info.to_dict(), f, indent=4)
return predictor_path
|
d2go-main
|
d2go/export/exporter.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.export import torchscript as _torchscript # noqa
|
d2go-main
|
d2go/export/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import sys
from abc import ABC, abstractmethod
from typing import Callable, Dict, NamedTuple, Optional, Union
import torch.nn as nn
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.registry import Registry
from mobile_cv.predictor.api import FuncInfo
from mobile_cv.predictor.builtin_functions import (
IdentityPostprocess,
IdentityPreprocess,
NaiveRunFunc,
)
if sys.version_info >= (3, 8):
from typing import final
else:
# If final decorator not available when using older python version, replace with the
# dummy implementation that does nothing.
def final(func):
return func
class PredictorExportConfig(NamedTuple):
"""
Storing information for exporting a predictor.
Args:
model (any nested iterable structure of nn.Module): the model(s) to be exported
(via tracing/onnx or scripting). This can be sub-model(s) when the predictor
consists of multiple models in deployable format, and/or pre/post processing
is excluded due to requirement of tracing or hardware incompatibility.
data_generator (Callable): a function to generate all data needed for tracing,
such that data = data_generator(x), the returned data has the same nested
structure as model. The data for each model will be treated as positional
arguments, i.e. model(*data).
model_export_kwargs (Dict): additional kwargs when exporting each sub-model, it
follows the same nested structure as the model, and may contains information
such as scriptable.
preprocess_info (FuncInfo): info for predictor's preprocess
postprocess_info (FuncInfo): info for predictor's postprocess
run_func_info (FuncInfo): info for predictor's run_fun
"""
model: Union[nn.Module, Dict[str, nn.Module]]
data_generator: Optional[Callable] = None
model_export_method: Optional[Union[str, Dict[str, str]]] = None
model_export_kwargs: Optional[Union[Dict, Dict[str, Dict]]] = None
preprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPreprocess, params={})
postprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPostprocess, params={})
run_func_info: FuncInfo = FuncInfo.gen_func_info(NaiveRunFunc, params={})
class ModelExportMethod(ABC):
"""
Base class for "model export method". Each model export method can export a pytorch
model to a certain deployable format, such as torchscript or caffe2. It consists
with `export` and `load` methods.
"""
@classmethod
@abstractmethod
def export(cls, model, input_args, save_path, export_method, **export_kwargs):
"""
Export the model to deployable format.
Args:
model (nn.Module): a pytorch model to export
input_args (Tuple[Any]): inputs of model, called as model(*input_args)
save_path (str): directory where the model will be exported
export_method (str): string name for the export method
export_kwargs (Dict): additional parameters for exporting model defined
by each model export method.
Return:
load_kwargs (Dict): additional information (besides save_path) needed in
order to load the exported model. This needs to be JSON serializable.
"""
pass
@classmethod
@abstractmethod
def load(cls, save_path, **load_kwargs):
"""
Load the exported model back for inference.
Args:
save_path (str): directory where the model is stored.
load_kwargs (Dict): addtional information to load the exported model.
Returns:
model (nn.Module): a nn.Module (often time a wrapper for non torchscript
types like caffe2), it works the same as the original pytorch model,
i.e. getting the same output when called as model(*input_args)
"""
pass
@classmethod
@final
def test_export_and_load(
cls, model, input_args, export_method, export_kwargs, output_checker
):
"""
Illustrate the life-cycle of export and load, used for testing.
"""
with make_temp_directory("test_export_and_load") as save_path:
# run the orginal model
assert isinstance(model, nn.Module), model
assert isinstance(input_args, (list, tuple)), input_args
original_output = model(*input_args)
# export the model
model.eval() # TODO: decide where eval() should be called
load_kwargs = cls.export(
model, input_args, save_path, export_method, **export_kwargs
)
# sanity check for load_kwargs
assert isinstance(load_kwargs, dict), load_kwargs
assert json.dumps(load_kwargs), load_kwargs
# loaded model back
loaded_model = cls.load(save_path, **load_kwargs)
# run the loaded model
assert isinstance(loaded_model, nn.Module), loaded_model
new_output = loaded_model(*input_args)
# compare outputs
output_checker(new_output, original_output)
ModelExportMethodRegistry = Registry("ModelExportMethod", allow_override=True)
|
d2go-main
|
d2go/export/api.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import logging
import os
from typing import Any, AnyStr, Dict, List, NamedTuple, Optional, Set, Tuple
import torch
from d2go.export.api import ModelExportMethod, ModelExportMethodRegistry
from detectron2.config.instantiate import dump_dataclass, instantiate
from detectron2.export import dump_torchscript_IR
from detectron2.export.flatten import flatten_to_tuple, TracingAdapter
from detectron2.export.torchscript_patch import patch_builtin_len
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.iter_utils import recursive_iterate
from torch import nn
from torch.utils.bundled_inputs import augment_model_with_bundled_inputs
from torch.utils.mobile_optimizer import MobileOptimizerType, optimize_for_mobile
logger = logging.getLogger(__name__)
TORCHSCRIPT_FILENAME_KEY: str = "torchscript_filename"
DEFAULT_JIT_MODE = "trace"
class MobileOptimizationConfig(NamedTuple):
# optimize_for_mobile
optimization_blocklist: Set[MobileOptimizerType] = None
preserved_methods: List[AnyStr] = None
backend: str = "CPU"
torchscript_filename: str = "mobile_optimized.ptl"
def export_optimize_and_save_torchscript(
model: nn.Module,
inputs: Optional[Tuple[Any]],
output_path: str,
*,
jit_mode: Optional[str] = DEFAULT_JIT_MODE,
torchscript_filename: str = "model.jit",
mobile_optimization: Optional[MobileOptimizationConfig] = None,
_extra_files: Optional[Dict[str, bytes]] = None,
) -> str:
"""
The primary function for exporting PyTorch model to TorchScript.
Args:
model (nn.Module): the model to export. When given a ScriptModule, skip the export
and only optimize and save model.
inputs (tuple or None): input arguments of model, can be called as model(*inputs).
Will not be used when scripting the model.
output_path (str): directory that the model will be saved.
jit_mode (str): trace/script or None if the model is already a ScriptModule.
torchscript_filename (str): the filename of non-mobile-optimized model.
mobile_optimization (MobileOptimizationConfig): when provided, the mobile optimization
will be applied.
_extra_files (Dict[str, bytes]): when provided, extra files will be saved.
Returns:
(str): filename of the final model no matter optmized or not.
"""
logger.info("Export, optimize and saving TorchScript to {} ...".format(output_path))
PathManager.mkdirs(output_path)
if _extra_files is None:
_extra_files = {}
if isinstance(model, torch.jit.ScriptModule):
if jit_mode is not None:
logger.info("The input model is already a ScriptModule, skip the jit step")
elif jit_mode == "trace":
logger.info("Tracing the model ...")
with torch.no_grad():
script_model = torch.jit.trace(model, inputs)
elif jit_mode == "script":
logger.info("Scripting the model ...")
script_model = torch.jit.script(model)
else:
raise ValueError("Unsupported jit_mode: {}".format(jit_mode))
with make_temp_directory("export_optimize_and_save_torchscript") as tmp_dir:
@contextlib.contextmanager
def _synced_local_file(rel_path):
remote_file = os.path.join(output_path, rel_path)
local_file = os.path.join(tmp_dir, rel_path)
yield local_file
PathManager.copy_from_local(local_file, remote_file, overwrite=True)
with _synced_local_file(torchscript_filename) as model_file:
logger.info(f"Saving torchscript model to: {torchscript_filename}")
torch.jit.save(script_model, model_file, _extra_files=_extra_files)
dump_torchscript_IR(script_model, os.path.join(output_path, "torchscript_IR"))
data_filename = "data.pth"
with _synced_local_file(data_filename) as data_file:
logger.info(f"Saving example data to: {data_filename}")
torch.save(inputs, data_file)
if mobile_optimization is not None:
logger.info("Applying optimize_for_mobile ...")
liteopt_model = optimize_for_mobile(
script_model,
optimization_blocklist=mobile_optimization.optimization_blocklist,
preserved_methods=mobile_optimization.preserved_methods,
backend=mobile_optimization.backend,
)
torchscript_filename = mobile_optimization.torchscript_filename
with _synced_local_file(torchscript_filename) as lite_path:
logger.info(f"Saving mobile optimized model to: {torchscript_filename}")
liteopt_model._save_for_lite_interpreter(
lite_path, _extra_files=_extra_files
)
op_names = torch.jit.export_opnames(liteopt_model)
logger.info(
"Operator names from lite interpreter:\n{}".format("\n".join(op_names))
)
logger.info("Applying augment_model_with_bundled_inputs ...")
# make all tensors zero-like to save storage
iters = recursive_iterate(inputs)
for x in iters:
if isinstance(x, torch.Tensor):
iters.send(torch.zeros_like(x).contiguous())
inputs = iters.value
augment_model_with_bundled_inputs(liteopt_model, [inputs])
# For non-cpu backends (e.g. Metal, Vulkan) the bundled inputs need to be
# converted with `torch.to(<myDevice>)` in order to predict successfully
# This is a temporary bypass until PT Edge supports automatic backend
# conversion in the bundled inputs interface, or we can auto-add a input tensor
# conversion op to Metal and Vulkan models.
target_backend = mobile_optimization.backend.lower()
if target_backend == "cpu":
# Sanity check by running
logger.info("Running sanity check for the mobile optimized model ...")
liteopt_model(*liteopt_model.get_all_bundled_inputs()[0])
name, ext = os.path.splitext(torchscript_filename)
input_bundled_path = name + "_bundled" + ext
with _synced_local_file(input_bundled_path) as lite_path:
logger.info(f"Saving input bundled model to: {input_bundled_path}")
liteopt_model._save_for_lite_interpreter(lite_path)
return torchscript_filename
# For backward compatibility, TODO: remove this function.
def trace_and_save_torchscript(
model: nn.Module,
inputs: Optional[Tuple[Any]],
output_path: str,
torchscript_filename: str = "model.jit",
mobile_optimization: Optional[MobileOptimizationConfig] = None,
_extra_files: Optional[Dict[str, bytes]] = None,
):
return export_optimize_and_save_torchscript(
model,
inputs,
output_path,
jit_mode="trace",
torchscript_filename=torchscript_filename,
mobile_optimization=mobile_optimization,
_extra_files=_extra_files,
)
class TorchscriptWrapper(nn.Module):
""" """
def __init__(self, module, int8_backend=None):
super().__init__()
self.module = module
self.int8_backend = int8_backend
def forward(self, *args, **kwargs):
# TODO: set int8 backend accordingly if needed
return self.module(*args, **kwargs)
def get_wrapped_models(self):
return self.module
def load_torchscript(model_path):
extra_files = {}
# NOTE: may support loading extra_file specified by model_info
# extra_files["predictor_info.json"] = ""
with PathManager.open(model_path, "rb") as f:
ts = torch.jit.load(f, _extra_files=extra_files)
return TorchscriptWrapper(ts)
def _is_data_flattened_tensors(data):
if isinstance(data, torch.Tensor):
return True
if isinstance(data, (tuple, list)):
if all(isinstance(x, torch.Tensor) for x in data):
return True
return False
def tracing_adapter_wrap_export(old_f):
def new_f(cls, model, input_args, save_path, export_method, **export_kwargs):
force_disable_tracing_adapter = export_kwargs.pop(
"force_disable_tracing_adapter", False
)
is_trace_mode = export_kwargs.get("jit_mode", "trace") == "trace"
if force_disable_tracing_adapter or not is_trace_mode:
logger.info("Not trace mode, export normally")
return old_f(
cls, model, input_args, save_path, export_method, **export_kwargs
)
if _is_data_flattened_tensors(input_args):
logger.info("Dry run the model to check if TracingAdapter is needed ...")
outputs = model(*input_args)
if _is_data_flattened_tensors(outputs):
logger.info(
"Both inputs and outputs are flattened tensors, export the model as is."
)
load_kwargs = old_f(
cls, model, input_args, save_path, export_method, **export_kwargs
)
assert "tracing_adapted" not in load_kwargs
load_kwargs.update({"tracing_adapted": False})
return load_kwargs
else:
logger.info(
"The outputs are not flattened tensors, can't trace normally."
)
else:
logger.info("The inputs are not flattened tensors, can't trace normally.")
logger.warning(
"Wrap the model with TracingAdapter to handle non-flattened inputs/outputs,"
" please be aware that the exported model will have different input/output data structure."
)
adapter = TracingAdapter(model, input_args)
load_kwargs = old_f(
cls,
adapter,
adapter.flattened_inputs,
save_path,
export_method,
**export_kwargs,
)
inputs_schema = dump_dataclass(adapter.inputs_schema)
outputs_schema = dump_dataclass(adapter.outputs_schema)
assert "tracing_adapted" not in load_kwargs
assert "inputs_schema" not in load_kwargs
assert "outputs_schema" not in load_kwargs
load_kwargs.update(
{
"tracing_adapted": True,
"inputs_schema": inputs_schema,
"outputs_schema": outputs_schema,
}
)
return load_kwargs
return new_f
class TracingAdapterModelWrapper(nn.Module):
def __init__(self, traced_model, inputs_schema, outputs_schema):
super().__init__()
self.traced_model = traced_model
self.inputs_schema = inputs_schema
self.outputs_schema = outputs_schema
def forward(self, *input_args):
flattened_inputs, _ = flatten_to_tuple(input_args)
flattened_outputs = self.traced_model(*flattened_inputs)
return self.outputs_schema(flattened_outputs)
def get_wrapped_models(self):
return self.traced_model
def tracing_adapter_wrap_load(old_f):
def new_f(cls, save_path, **load_kwargs):
tracing_adapted = load_kwargs.pop("tracing_adapted", False)
if not tracing_adapted:
logger.info("The model is not tracing adapted, load it normally.")
return old_f(cls, save_path, **load_kwargs)
logger.info(
"The model is tracing adapted, load the schema and wrap the model for inference."
)
assert "inputs_schema" in load_kwargs, load_kwargs.keys()
assert "outputs_schema" in load_kwargs, load_kwargs.keys()
inputs_schema = instantiate(load_kwargs.pop("inputs_schema"))
outputs_schema = instantiate(load_kwargs.pop("outputs_schema"))
traced_model = old_f(cls, save_path, **load_kwargs)
return TracingAdapterModelWrapper(traced_model, inputs_schema, outputs_schema)
return new_f
def update_export_kwargs_from_export_method(old_f):
"""
Provide some convenient way of updating export_kwargs by adding trigger words in
`export_method`. For example, instead of setting `mobile_optimization` in the
model_export_kwargs of the PredictorExportConfig, user can simply put the `_mobile`
trigger word in the --predictor-type (which will then be forwarded as `export_method`
in most cases) to enable mobile optimizaiton.
Please note that there's a finite set of allowed "export_method" values,
and an error will be raised if the string cannot be fully parsed.
The recognized values generally follow a pattern of:
"torchscript[_mobile][_int8][-vulkan | -metal][@scripting | @tracing]"
Some examples (not comprehensive because flag words' order can be swapped):
"torchscript"
"torchscript_mobile"
"torchscript_mobile-metal"
"torchscript_mobile-vulkan"
"torchscript_mobile_int8"
"torchscript@scripting"
"torchscript_int8@scripting"
"torchscript_mobile@scripting"
"torchscript_mobile-metal@scripting"
"torchscript_mobile-vulkan@scripting"
"torchscript_mobile_int8@scripting"
"torchscript@tracing"
"torchscript_int8@tracing"
"torchscript_mobile@tracing"
"torchscript_mobile-metal@tracing"
"torchscript_mobile-vulkan@tracing"
"torchscript_mobile_int8@tracing"
"""
def new_f(cls, model, input_args, save_path, export_method, **export_kwargs):
if export_method is not None:
assert isinstance(export_method, str)
original_export_method = export_method
if "_mobile" in export_method:
if "mobile_optimization" in export_kwargs:
logger.warning(
"`mobile_optimization` is already specified, keep using it"
)
else:
# Infer a MobileOptimizationConfig if none was provided
# "CPU" backend default. If found appropriate suffix, update the backend
if "-metal" in export_method:
mobile_opt_config = MobileOptimizationConfig(backend="metal")
export_method = export_method.replace("-metal", "", 1)
elif "-vulkan" in export_method:
mobile_opt_config = MobileOptimizationConfig(backend="vulkan")
export_method = export_method.replace("-vulkan", "", 1)
else:
mobile_opt_config = MobileOptimizationConfig()
export_kwargs["mobile_optimization"] = mobile_opt_config
export_method = export_method.replace("_mobile", "", 1)
if "@scripting" in export_method:
jit_mode = export_kwargs.get("jit_mode", None)
if jit_mode and jit_mode != "script":
logger.warning(
"`jit_mode` is already specified as {}, overwrite it to `script`"
" since @scripting appears in export_method".format(jit_mode)
)
export_kwargs["jit_mode"] = "script"
export_method = export_method.replace("@scripting", "", 1)
if "@tracing" in export_method:
jit_mode = export_kwargs.get("jit_mode", None)
if jit_mode and jit_mode != "trace":
logger.warning(
"`jit_mode` is already specified as {}, overwrite it to `trace`"
" since @tracing appears in export_method".format(jit_mode)
)
export_kwargs["jit_mode"] = "trace"
export_method = export_method.replace("@tracing", "", 1)
if "_int8" in export_method:
export_method = export_method.replace("_int8", "", 1)
if export_method != "torchscript":
logger.warning(
"Suspcious export_method after removing triggering words,"
" original export_method: {}, remaining: {}".format(
original_export_method, export_method
)
)
return old_f(cls, model, input_args, save_path, export_method, **export_kwargs)
return new_f
class DefaultTorchscriptExport(ModelExportMethod):
@classmethod
@update_export_kwargs_from_export_method
def export(
cls,
model: nn.Module,
input_args: Tuple[Tuple[torch.Tensor]],
save_path: str,
export_method: Optional[str],
**export_kwargs,
):
expected_arguments = {
"jit_mode",
"torchscript_filename",
"mobile_optimization",
"_extra_files",
}
filtered_kwargs = {
k: v for k, v in export_kwargs.items() if k in expected_arguments
}
torchscript_filename = export_optimize_and_save_torchscript(
model, input_args, save_path, **filtered_kwargs
)
return {TORCHSCRIPT_FILENAME_KEY: torchscript_filename}
@classmethod
def load(cls, save_path, *, torchscript_filename="model.jit"):
model_path = os.path.join(save_path, torchscript_filename)
return load_torchscript(model_path)
@ModelExportMethodRegistry.register("torchscript")
@ModelExportMethodRegistry.register("torchscript_int8")
@ModelExportMethodRegistry.register("torchscript_mobile")
@ModelExportMethodRegistry.register("torchscript_mobile-metal")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan")
@ModelExportMethodRegistry.register("torchscript_mobile_int8")
@ModelExportMethodRegistry.register("torchscript@scripting")
@ModelExportMethodRegistry.register("torchscript_int8@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile-metal@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile_int8@scripting")
@ModelExportMethodRegistry.register("torchscript@tracing")
@ModelExportMethodRegistry.register("torchscript_int8@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile-metal@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile_int8@tracing")
class TracingAdaptedTorchscriptExport(DefaultTorchscriptExport):
@classmethod
@update_export_kwargs_from_export_method
@tracing_adapter_wrap_export
def export(cls, model, input_args, save_path, export_method, **export_kwargs):
with patch_builtin_len():
return super().export(
model, input_args, save_path, export_method, **export_kwargs
)
@classmethod
@tracing_adapter_wrap_load
def load(cls, save_path, **load_kwargs):
return super().load(save_path, **load_kwargs)
|
d2go-main
|
d2go/export/torchscript.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
import operator
from collections import defaultdict, OrderedDict
from typing import Dict
import torch
from d2go.config import CfgNode
from d2go.data.dataset_mappers.build import build_dataset_mapper
from d2go.data.utils import ClipLengthGroupedDataset
from detectron2.data import (
build_batch_data_loader,
build_detection_train_loader,
get_detection_dataset_dicts,
)
from detectron2.data.build import worker_init_reset_seed
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import RepeatFactorTrainingSampler
from detectron2.utils.comm import get_world_size
from mobile_cv.common.misc.oss_utils import fb_overwritable
from tabulate import tabulate
logger = logging.getLogger(__name__)
def add_weighted_training_sampler_default_configs(cfg: CfgNode):
"""
The CfgNode under cfg.DATASETS.TRAIN_REPEAT_FACTOR should be a list of
tuples (dataset_name, scalar-repeat-factor) specifying upsampled frequencies
for each dataset when using RepeatFactorTrainingSampler. An example looks like:
DATASETS:
TRAIN:
- "train_1"
- "train_2"
- "small_train_3"
TEST: ...
TRAIN_REPEAT_FACTOR:
- ["small_train_3", 2.5]
"""
cfg.DATASETS.TRAIN_REPEAT_FACTOR = []
def add_random_subset_training_sampler_default_configs(cfg: CfgNode):
"""
Add default cfg.DATALOADER.RANDOM_SUBSET_RATIO for RandomSubsetTrainingSampler
The CfgNode under cfg.DATALOADER.RANDOM_SUBSET_RATIO should be a float > 0 and <= 1
"""
cfg.DATALOADER.RANDOM_SUBSET_RATIO = 1.0
def get_train_datasets_repeat_factors(cfg: CfgNode) -> Dict[str, float]:
repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR
assert all(len(tup) == 2 for tup in repeat_factors)
name_to_weight = defaultdict(lambda: 1, dict(repeat_factors))
# The sampling weights map should only contain datasets in train config
unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN)
assert not unrecognized, f"unrecognized datasets: {unrecognized}"
logger.info(f"Found repeat factors: {list(name_to_weight.items())}")
# pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`.
return name_to_weight
def get_sampling_probability_table(
dataset_sizes: Dict[str, int], dataset_repeat_factors: Dict[str, float]
) -> str:
total_sum = sum(
dataset_repeat_factors.get(dsname, 1.0) * size
for dsname, size in dataset_sizes.items()
)
sample_prob_data = [
(
dsname,
size,
dataset_repeat_factors.get(dsname, 1.0),
(dataset_repeat_factors.get(dsname, 1.0) * size) * 100 / total_sum,
)
for dsname, size in dataset_sizes.items()
]
headers = ["Dataset", "Samples", "Repeat factor", "Sample Prob (%)"]
table = tabulate(sample_prob_data, headers=headers, tablefmt="pipe")
return table
def build_weighted_detection_train_loader(
cfg: CfgNode, mapper=None, enable_category_balance=False
):
dataset_repeat_factors = get_train_datasets_repeat_factors(cfg)
# OrderedDict to guarantee order of values() consistent with repeat factors
dataset_name_to_dicts = OrderedDict(
{
name: get_detection_dataset_dicts(
[name],
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
for name in cfg.DATASETS.TRAIN
}
)
# Repeat factor for every sample in the dataset
repeat_factors = [
[dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname])
for dsname in cfg.DATASETS.TRAIN
]
sampling_prob_table = get_sampling_probability_table(
{dsname: len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN},
dataset_repeat_factors,
)
logger.info("Dataset TRAIN sampling probability: \n" + sampling_prob_table)
repeat_factors = list(itertools.chain.from_iterable(repeat_factors))
dataset_dicts = dataset_name_to_dicts.values()
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
repeat_factors = torch.tensor(repeat_factors)
if enable_category_balance:
"""
1. Calculate repeat factors using category frequency for each dataset and then merge them.
2. Element wise dot producting the dataset frequency repeat factors with
the category frequency repeat factors gives the final repeat factors.
"""
category_repeat_factors = [
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD
)
for dataset_dict in dataset_name_to_dicts.values()
]
# flatten the category repeat factors from all datasets
category_repeat_factors = list(
itertools.chain.from_iterable(category_repeat_factors)
)
category_repeat_factors = torch.tensor(category_repeat_factors)
repeat_factors = torch.mul(category_repeat_factors, repeat_factors)
repeat_factors = repeat_factors / torch.min(repeat_factors)
logger.info(
"Using WeightedCategoryTrainingSampler with repeat_factors={}".format(
cfg.DATASETS.TRAIN_REPEAT_FACTOR
)
)
else:
logger.info(
"Using WeightedTrainingSampler with repeat_factors={}".format(
cfg.DATASETS.TRAIN_REPEAT_FACTOR
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
def build_clip_grouping_data_loader(dataset, sampler, total_batch_size, num_workers=0):
"""
Build a batched dataloader for training with video clips.
Args:
dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed.
sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices
total_batch_size (int): total batch size across GPUs.
num_workers (int): number of parallel data loading workers
Returns:
iterable[list]. Length of each list is the batch size of the current
GPU. Each element in the list comes from the dataset.
"""
world_size = get_world_size()
assert (
total_batch_size > 0 and total_batch_size % world_size == 0
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size, world_size
)
batch_size = total_batch_size // world_size
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
return ClipLengthGroupedDataset(data_loader, batch_size)
@fb_overwritable()
def build_mapped_train_loader(cfg, mapper):
if cfg.DATALOADER.SAMPLER_TRAIN == "WeightedTrainingSampler":
# balancing only datasets frequencies
data_loader = build_weighted_detection_train_loader(cfg, mapper=mapper)
elif cfg.DATALOADER.SAMPLER_TRAIN == "WeightedCategoryTrainingSampler":
# balancing both datasets and its categories
data_loader = build_weighted_detection_train_loader(
cfg, mapper=mapper, enable_category_balance=True
)
else:
data_loader = build_detection_train_loader(cfg, mapper=mapper)
return data_loader
def build_d2go_train_loader(cfg, mapper=None):
"""
Build the dataloader for training in D2Go. This is the main entry and customizations
will be done by using Registry.
This interface is currently experimental.
"""
logger.info("Building D2Go's train loader ...")
# TODO: disallow passing mapper and use registry for all mapper registering
mapper = mapper or build_dataset_mapper(cfg, is_train=True)
logger.info("Using dataset mapper:\n{}".format(mapper))
data_loader = build_mapped_train_loader(cfg, mapper)
# TODO: decide if move vis_wrapper inside this interface
return data_loader
|
d2go-main
|
d2go/data/build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
def add_d2go_data_default_configs(_C):
_C.D2GO_DATA = CN()
# Config for "detectron2go.data.extended_coco.extended_coco_load"
_C.D2GO_DATA.DATASETS = CN()
# List of class names to use when loading the data, this applies to train
# and test separately. Default value means using all classes, otherwise it'll create
# new json file containing only given categories.
_C.D2GO_DATA.DATASETS.TRAIN_CATEGORIES = ()
_C.D2GO_DATA.DATASETS.TEST_CATEGORIES = ()
# Register a list of COCO datasets in config
# The following specifies additional coco data to inject. The required is the
# name (NAMES), image root (IM_DIRS), coco json file (JSON_FILES) while keypoint
# metadata (KEYPOINT_METADATA) is optional. The keypoint metadata name provided
# here is used to lookup the metadata specified within the KEYPOINT_METADATA
# metadata registry specified in "data/keypoint_metadata_registry.py". For adding
# new use cases, simply register new metadata to that registry.
_C.D2GO_DATA.DATASETS.COCO_INJECTION = CN()
_C.D2GO_DATA.DATASETS.COCO_INJECTION.NAMES = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION = "_register_extended_coco"
# On-the-fly register a list of datasets located under detectron2go/datasets
# by specifying the filename (without .py).
_C.D2GO_DATA.DATASETS.DYNAMIC_DATASETS = []
# Config for caching the dataset annotations on local disk
_C.D2GO_DATA.DATASETS.DISK_CACHE = CN()
_C.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED = False
# TODO: potentially add this config
# # List of extra keys in annotation, the item will be forwarded by
# # extended_coco_load.
# _C.D2GO_DATA.DATASETS.ANNOTATION_FIELDS_TO_FORWARD = ()
# Config for D2GoDatasetMapper
_C.D2GO_DATA.MAPPER = CN()
# dataset mapper name
_C.D2GO_DATA.MAPPER.NAME = "D2GoDatasetMapper"
# When enabled, image item from json dataset doesn't need to have width/hegiht,
# they will be backfilled once image is loaded. This may cause issue when
# width/hegiht is acutally been used by extended_coco_load, eg. grouping
# by aspect ratio.
_C.D2GO_DATA.MAPPER.BACKFILL_SIZE = False
_C.D2GO_DATA.MAPPER.RETRY = 3
_C.D2GO_DATA.MAPPER.CATCH_EXCEPTION = True
_C.D2GO_DATA.AUG_OPS = CN()
# List of transforms that are represented by string. Each string starts with
# a registered name in TRANSFORM_OP_REGISTRY, optionally followed by a string
# argument (separated by "::") which can be used for initializing the
# transform object. See build_transform_gen for the detail.
# Some examples are:
# example 1: RandomFlipOp
# example 2: RandomFlipOp::{}
# example 3: RandomFlipOp::{"prob":0.5}
# example 4: RandomBrightnessOp::{"intensity_min":1.0, "intensity_max":2.0}
# NOTE: search "example repr:" in fbcode for examples.
_C.D2GO_DATA.AUG_OPS.TRAIN = ["ResizeShortestEdgeOp", "RandomFlipOp"]
_C.D2GO_DATA.AUG_OPS.TEST = ["ResizeShortestEdgeOp"]
_C.D2GO_DATA.TEST = CN()
# Evaluate on the first specified number of images for each datset during
# testing, default value 0 means using all images.
# NOTE: See maybe_subsample_n_images for details.
_C.D2GO_DATA.TEST.MAX_IMAGES = 0
_C.D2GO_DATA.TEST.SUBSET_SAMPLING = "frontmost" # one of {"frontmost", "random"}
return _C
|
d2go-main
|
d2go/data/config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import logging
import pickle
import shutil
import uuid
import numpy as np
from detectron2.utils import comm
from detectron2.utils.logger import log_every_n_seconds
logger = logging.getLogger(__name__)
# NOTE: Use unique ROOT_CACHE_DIR for each run, during the run, each instance of data
# loader will create a `cache_dir` under ROOT_CACHE_DIR. When the DL instance is GC-ed,
# the `cache_dir` will be removed by __del__; when the run is finished or interrupted,
# atexit.register will be triggered to remove the ROOT_CACHE_DIR to make sure there's no
# leftovers. Regarding DDP, although each GPU process has their own random value for
# ROOT_CACHE_DIR, but each GPU process uses the same `cache_dir` broadcasted from local
# master rank, which is then inherited by each data loader worker, this makes sure that
# `cache_dir` is in-sync between all GPUs and DL works on the same node.
ROOT_CACHE_DIR = "/tmp/DatasetFromList_cache_" + uuid.uuid4().hex[:8]
def _local_master_gather(func, check_equal=False):
if comm.get_local_rank() == 0:
x = func()
assert x is not None
else:
x = None
x_all = comm.all_gather(x)
x_local_master = [x for x in x_all if x is not None]
if check_equal:
master = x_local_master[0]
assert all(x == master for x in x_local_master), x_local_master
return x_local_master
class DiskCachedList(object):
"""
Wrap a list, the underlying storage is off-loaded to disk to save RAM usage.
"""
def __init__(self, lst, strategy="batched_static"):
"""
Args:
lst (list): a list which contains elements to produce.
strategy (str): strategy of using diskcache, supported strategies:
- native: saving each item individually.
- batched_static: group N items together, where N is calculated from
the average item size.
"""
self._lst = lst
self._diskcache_strategy = strategy
def _serialize(data):
buffer = pickle.dumps(data, protocol=-1)
return np.frombuffer(buffer, dtype=np.uint8)
logger.info(
"Serializing {} elements to byte tensors ...".format(len(self._lst))
)
self._lst = [_serialize(x) for x in self._lst]
total_size = sum(len(x) for x in self._lst)
# TODO: only enabling DiskCachedDataset for large enough dataset
logger.info(
"Serialized dataset takes {:.2f} MiB".format(total_size / 1024**2)
)
self._initialize_diskcache()
def _initialize_diskcache(self):
from mobile_cv.common.misc.local_cache import LocalCache
cache_dir = "{}/{}".format(ROOT_CACHE_DIR, uuid.uuid4().hex[:8])
cache_dir = comm.all_gather(cache_dir)[0] # use same cache_dir
logger.info("Creating diskcache database in: {}".format(cache_dir))
self._cache = LocalCache(cache_dir=cache_dir, num_shards=8)
# self._cache.cache.clear(retry=True) # seems faster if index exists
if comm.get_local_rank() == 0:
if self._diskcache_strategy == "naive":
for i, item in enumerate(self._lst):
ret = self._write_to_local_db((i, item))
assert ret, "Error writing index {} to local db".format(i)
pct = 100.0 * i / len(self._lst)
self._log_progress(pct)
# NOTE: each item might be small in size (hundreds of bytes),
# writing million of them can take a pretty long time (hours)
# because of frequent disk access. One solution is grouping a batch
# of items into larger blob.
elif self._diskcache_strategy == "batched_static":
TARGET_BYTES = 50 * 1024
average_bytes = np.average(
[
self._lst[int(x)].size
for x in np.linspace(0, len(self._lst) - 1, 1000)
]
)
self._chuck_size = max(1, int(TARGET_BYTES / average_bytes))
logger.info(
"Average data size: {} bytes; target chuck data size {} KiB;"
" {} items per chuck; {} chucks in total".format(
average_bytes,
TARGET_BYTES / 1024,
self._chuck_size,
int(len(self._lst) / self._chuck_size),
)
)
for i in range(0, len(self._lst), self._chuck_size):
chunk = self._lst[i : i + self._chuck_size]
chunk_i = int(i / self._chuck_size)
ret = self._write_to_local_db((chunk_i, chunk))
assert ret, "Error writing index {} to local db".format(chunk_i)
pct = 100.0 * i / len(self._lst)
self._log_progress(pct)
# NOTE: instead of using fixed chuck size, items can be grouped dynamically
elif self._diskcache_strategy == "batched_dynamic":
raise NotImplementedError()
else:
raise NotImplementedError(self._diskcache_strategy)
comm.synchronize()
logger.info(
"Finished writing to local disk, db size: {:.2f} MiB".format(
self._cache.cache.volume() / 1024**2
)
)
# Optional sync for some strategies
if self._diskcache_strategy == "batched_static":
# propagate chuck size and make sure all local rank 0 uses the same value
self._chuck_size = _local_master_gather(
lambda: self._chuck_size, check_equal=True
)[0]
logger.info("Gathered chuck size: {}".format(self._chuck_size))
# free the memory of self._lst
self._size = _local_master_gather(lambda: len(self._lst), check_equal=True)[0]
logger.info("Gathered list size: {}".format(self._size))
del self._lst
def _write_to_local_db(self, task):
index, record = task
db_path = str(index)
# suc = self._cache.load(lambda path, x: x, db_path, record)
# record = BytesIO(np.random.bytes(np.random.randint(70000, 90000)))
suc = self._cache.cache.set(db_path, record, retry=True)
return suc
def _log_progress(self, percentage):
log_every_n_seconds(
logging.INFO,
"({:.2f}%) Wrote {} elements to local disk cache, db size: {:.2f} MiB".format(
percentage,
len(self._cache.cache),
self._cache.cache.volume() / 1024**2,
),
n=10,
)
def __len__(self):
if self._diskcache_strategy == "batched_static":
return self._size
else:
raise NotImplementedError()
def __getitem__(self, idx):
if self._diskcache_strategy == "naive":
bytes = memoryview(self._cache.cache[str(idx)])
return pickle.loads(bytes)
elif self._diskcache_strategy == "batched_static":
chunk_i, residual = divmod(idx, self._chuck_size)
chunk = self._cache.cache[str(chunk_i)]
bytes = memoryview(chunk[residual])
return pickle.loads(bytes)
else:
raise NotImplementedError()
@property
def cache_dir(self):
"""return the current cache dirs of DiskCachedDatasetFromList instance"""
return self._cache.cache_dir
@staticmethod
@atexit.register
def _clean_up_root_cache_dir():
# in case the program exists unexpectly, clean all the cache dirs created by
# this session.
if comm.get_local_rank() == 0:
_clean_up_cache_dir(ROOT_CACHE_DIR)
def __del__(self):
# when data loader goes are GC-ed, remove the cache dir. This is needed to not
# waste disk space in case that multiple data loaders are used, eg. running
# evaluations on multiple datasets during training.
if comm.get_local_rank() == 0:
_clean_up_cache_dir(self.cache_dir)
def _clean_up_cache_dir(cache_dir):
print("Cleaning up cache dir: {}".format(cache_dir))
shutil.rmtree(
cache_dir,
onerror=lambda func, path, ex: print(
"Catch error when removing {}; func: {}; exc_info: {}".format(
path, func, ex
)
),
)
|
d2go-main
|
d2go/data/disk_cache.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import logging
import os
import shlex
import subprocess
from collections import defaultdict
from typing import Callable, Dict, List, Optional
import detectron2.utils.comm as comm
from d2go.data.cache_util import _cache_json_file
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from pycocotools.coco import COCO
logger = logging.getLogger(__name__)
class InMemoryCOCO(COCO):
def __init__(self, loaded_json):
"""
In this in-memory version of COCO we don't load json from the file,
but direclty use a loaded_json instead. This approach improves
both robustness and efficiency, as when we convert from other formats
to COCO format, we don't need to save and re-load the json again.
"""
# load dataset
self.dataset = loaded_json
self.anns = {}
self.cats = {}
self.imgs = {}
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
self.createIndex()
def extract_archive_file(archive_fn: str, im_dir: str):
if not PathManager.exists(im_dir) or not PathManager.ls(im_dir):
# Dataset is not deployed. Deploy it.
archive_fns = archive_fn
# A dataset may be composed of several tgz files, or only one.
# If one, make it into a list to make the code later more general
if not isinstance(archive_fns, list):
archive_fns = [archive_fns]
logger.info(
"Extracting datasets {} to local machine at {}".format(archive_fns, im_dir)
)
if not PathManager.exists(im_dir):
PathManager.mkdirs(im_dir)
for archive_fn in archive_fns:
# Extract the tgz file directly into the target directory,
# without precopy.
# Note that the tgz file contains a root directory that
# we do not want, hence the strip-components=1
commandUnpack = (
"tar -mxzf {src_file} -C {tgt_dir} " "--strip-components=1"
).format(src_file=archive_fn, tgt_dir=im_dir)
assert not subprocess.call(shlex.split(commandUnpack)), "Failed to unpack"
logger.info("Extracted {}".format(archive_fn))
COCOTEXT_DATASET_CONVERSION_STATUS = {}
def save_converted_json(target_json, convert_coco_dict):
if target_json in COCOTEXT_DATASET_CONVERSION_STATUS:
return
PathManager.mkdirs(os.path.dirname(target_json))
if comm.get_local_rank() == 0:
with PathManager.open(target_json, "w") as f:
json.dump(convert_coco_dict, f)
comm.synchronize()
COCOTEXT_DATASET_CONVERSION_STATUS[target_json] = True
def convert_coco_text_to_coco_detection_json(
source_json: str,
target_json: str,
set_type: Optional[str] = None,
min_img_size: int = 100,
text_cat_id: int = 1,
) -> Dict:
"""
This function converts a COCOText style JSON to a COCODetection style
JSON.
For COCOText see: https://vision.cornell.edu/se3/coco-text-2/
For COCODetection see: http://cocodataset.org/#overview
"""
with PathManager.open(source_json, "r") as f:
coco_text_json = json.load(f)
coco_text_json["annotations"] = list(coco_text_json["anns"].values())
coco_text_json["images"] = list(coco_text_json["imgs"].values())
if set_type is not None:
# COCO Text style JSONs often mix test, train, and val sets.
# We need to make sure we only use the data type we want.
coco_text_json["images"] = [
x for x in coco_text_json["images"] if x["set"] == set_type
]
coco_text_json["categories"] = [{"name": "text", "id": text_cat_id}]
del coco_text_json["cats"]
del coco_text_json["imgs"]
del coco_text_json["anns"]
for ann in coco_text_json["annotations"]:
ann["category_id"] = text_cat_id
ann["iscrowd"] = 0
# Don't evaluate the model on illegible words
if set_type == "val" and ann["legibility"] != "legible":
ann["ignore"] = True
# Some datasets seem to have extremely small images which break downstream
# operations. If min_img_size is set, we can remove these.
coco_text_json["images"] = [
x
for x in coco_text_json["images"]
if x["height"] >= min_img_size and x["width"] >= min_img_size
]
# Remap image_ids if necessary
if isinstance(coco_text_json["images"][0]["id"], str):
image_id_remap = {
x["id"]: id_no for (id_no, x) in enumerate(coco_text_json["images"])
}
for x in coco_text_json["images"]:
x["id"] = image_id_remap[x["id"]]
for x in coco_text_json["annotations"]:
if x["image_id"] in image_id_remap:
x["image_id"] = image_id_remap[x["image_id"]]
save_converted_json(target_json, coco_text_json)
return coco_text_json
def valid_bbox(bbox_xywh: List[int], img_w: int, img_h: int) -> bool:
if (
bbox_xywh is None
or not len(bbox_xywh) == 4
or (bbox_xywh[3] == 0 or bbox_xywh[2] == 0)
or not (0 <= bbox_xywh[0] <= img_w - bbox_xywh[2])
or not (0 <= bbox_xywh[1] <= img_h - bbox_xywh[3])
):
return False
return True
def valid_bbox_rotated(bbox_xywha: List[int], img_w: int, img_h: int) -> bool:
if (
bbox_xywha is None
or (bbox_xywha[3] == 0 or bbox_xywha[2] == 0)
or not (
0.4 * bbox_xywha[2] <= bbox_xywha[0] <= img_w - bbox_xywha[2] * 0.4
) # using 0.4*h and 0.4*w to give some leeway for rotation but still remove huge bboxes for training stability
or not (0.4 * bbox_xywha[3] <= bbox_xywha[1] <= img_h - bbox_xywha[3] * 0.4)
):
return False
return True
def convert_coco_annotations(
anno_dict_list: List[Dict],
record: Dict,
remapped_id: Dict,
error_report: Dict,
filter_invalid_bbox: Optional[bool] = True,
):
"""
Converts annotations format of coco to internal format while applying
some filtering
"""
converted_annotations = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same. This fails
# only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == record["image_id"]
assert anno.get("ignore", 0) == 0
# Copy fields that do not need additional conversion
fields_to_copy = [
"iscrowd",
"bbox",
"bbox_mode",
"keypoints",
"category_id",
"extras",
"point_coords",
"point_labels",
"associations",
"file_name",
]
# NOTE: maybe use MetadataCatalog for this
obj = {field: anno[field] for field in fields_to_copy if field in anno}
# Filter out bad annotations where category do not match
if obj.get("category_id", None) not in remapped_id:
continue
# Bounding boxes: convert and filter out bad bounding box annotations
bbox_object = obj.get("bbox", None)
if bbox_object:
if "bbox_mode" in obj:
bbox_object = BoxMode.convert(
bbox_object, obj["bbox_mode"], BoxMode.XYWH_ABS
)
else:
# Assume default box mode is always (x, y, w h)
error_report["without_bbox_mode"].cnt += 1
obj["bbox_mode"] = (
BoxMode.XYWHA_ABS if len(obj["bbox"]) == 5 else BoxMode.XYWH_ABS
)
if obj["bbox_mode"] != BoxMode.XYWHA_ABS: # for horizontal bboxes
if (
filter_invalid_bbox
and record.get("width")
and record.get("height")
and not valid_bbox(bbox_object, record["width"], record["height"])
):
error_report["without_valid_bounding_box"].cnt += 1
continue
else: # for rotated bboxes in XYWHA format
if (
filter_invalid_bbox
and record.get("width")
and record.get("height")
and not valid_bbox_rotated(
bbox_object, record["width"], record["height"]
)
):
error_report["without_valid_bounding_box"].cnt += 1
continue
# Segmentation: filter and add segmentation
segm = anno.get("segmentation", None)
if segm: # either list[list[float]] or dict(RLE)
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
error_report["without_valid_segmentation"].cnt += 1
continue # ignore this instance
obj["segmentation"] = segm
# Remap ids
obj["category_id"] = remapped_id[obj["category_id"]]
converted_annotations.append(obj)
return converted_annotations
# Error entry class for reporting coco conversion issues
class ErrorEntry:
def __init__(self, error_name, msg, cnt=0):
self.error_name = error_name
self.cnt = cnt
self.msg = msg
def __repr__(self):
return f"{self.msg} for {self.error_name}, count = {self.cnt}"
def print_conversion_report(ann_error_report, image_error_report, ex_warning_fn):
# Report image errors
report_str = ""
for error_key in image_error_report:
if image_error_report[error_key].cnt > 0:
report_str += f"\t{image_error_report[error_key]}\n"
if error_key == "ignore_image_root" and ex_warning_fn:
report_str += f"\texample file name {ex_warning_fn}\n"
# Report annotation errors
for error_key in ann_error_report:
if ann_error_report[error_key].cnt > 0:
report_str += f"\t{ann_error_report[error_key]}\n"
if len(report_str):
logger.warning(f"Conversion issues:\n{report_str}")
def _assign_annotations_to_record(
record: Dict, converted_anns: List[Dict], all_cat_names: Optional[List[str]]
) -> None:
record["annotations"] = converted_anns
if converted_anns and all(
[ann.get("file_name", "").endswith(".png") for ann in converted_anns]
):
if len(converted_anns) == 1:
record["sem_seg_file_name"] = converted_anns[0]["file_name"]
return
assert (
all_cat_names
), f"all_cat_names needs to be specified for MCS dataset: {converted_anns}"
record["multi_sem_seg_file_names"] = {
all_cat_names[ann["category_id"]]: ann["file_name"]
for ann in converted_anns
}
def _process_associations(
record: Dict, converted_anns: List[Dict], _post_process_: Optional[Callable]
) -> None:
post_process_dict = {"_post_process_": _post_process_} if _post_process_ else {}
record.update(post_process_dict)
if "associations" not in record or "associations" not in converted_anns[0]:
return
assert (
len(converted_anns) == 1
), "Only one annotation expected when associated frames exist!"
for key, associated_ann in converted_anns[0]["associations"].items():
if key not in record["associations"]:
continue
record["associations"][key] = {
"file_name": record["associations"][key],
"sem_seg_file_name": associated_ann,
}
record["associations"][key].update(post_process_dict)
# Following D23593142 to save memory
record["associations"] = list(record["associations"])
def convert_to_dict_list(
image_root: str,
remapped_id: Dict,
imgs: List[Dict],
anns: List[Dict],
dataset_name: Optional[str] = None,
all_cat_names: Optional[List[str]] = None,
image_direct_copy_keys: Optional[List[str]] = None,
filter_invalid_bbox: Optional[bool] = True,
filter_empty_annotations: Optional[bool] = True,
_post_process_: Optional[Callable] = None,
) -> List[Dict]:
ann_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("without_valid_segmentation", "Instance filtered"),
("without_valid_bounding_box", "Instance filtered"),
("without_bbox_mode", "Warning"),
]
}
image_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("ignore_image_root", f"Image root ignored {image_root}"),
(
"no_annotations",
"Image filtered" if filter_empty_annotations else "Warning",
),
]
}
ex_warning_fn = None
default_record = {"dataset_name": dataset_name} if dataset_name else {}
converted_dict_list = []
for (img_dict, anno_dict_list) in zip(imgs, anns):
record = copy.deepcopy(default_record)
# NOTE: besides using (relative path) in the "file_name" filed to represent
# the image resource, "extended coco" also supports using uri which
# represents an image using a single string, eg. "everstore_handle://xxx",
if "://" not in img_dict["file_name"]:
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
else:
if image_root is not None:
image_error_report["ignore_image_root"].cnt += 1
ex_warning_fn = (
ex_warning_fn if ex_warning_fn else img_dict["file_name"]
)
record["file_name"] = img_dict["file_name"]
# Setup image info and id
if "height" in img_dict or "width" in img_dict:
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
# Convert annotation for dataset_dict
converted_anns = convert_coco_annotations(
anno_dict_list,
record,
remapped_id,
ann_error_report,
filter_invalid_bbox=filter_invalid_bbox,
)
if len(converted_anns) == 0:
image_error_report["no_annotations"].cnt += 1
if filter_empty_annotations:
continue
_assign_annotations_to_record(record, converted_anns, all_cat_names)
if "associations" in img_dict:
record["associations"] = img_dict["associations"]
_process_associations(record, converted_anns, _post_process_)
# Copy keys if additionally asked
if image_direct_copy_keys:
for c_key in image_direct_copy_keys:
assert c_key in img_dict, f"{c_key} not in coco image entry annotation"
record[c_key] = img_dict[c_key]
converted_dict_list.append(record)
print_conversion_report(ann_error_report, image_error_report, ex_warning_fn)
assert len(converted_dict_list) != 0, (
f"Loaded zero entries from {dataset_name}. \n"
f" Size of inputs (imgs={len(imgs)}, anns={len(anns)})\n"
f" Image issues ({image_error_report})\n"
f" Instance issues ({ann_error_report})\n"
)
return converted_dict_list
def coco_text_load(
coco_json_file: str,
image_root: str,
source_json_file: Optional[str] = None,
dataset_name: Optional[str] = None,
archive_file: Optional[str] = None,
) -> List[Dict]:
if archive_file is not None:
if comm.get_local_rank() == 0:
extract_archive_file(archive_file, image_root)
comm.synchronize()
if source_json_file is not None:
# Need to convert to coco detection format
loaded_json = convert_coco_text_to_coco_detection_json(
source_json_file, coco_json_file
)
return extended_coco_load(coco_json_file, image_root, dataset_name, loaded_json)
return extended_coco_load(
coco_json_file, image_root, dataset_name, loaded_json=None
)
def extended_coco_load(
json_file: str,
image_root: str,
dataset_name: Optional[str] = None,
loaded_json: Optional[str] = None,
image_direct_copy_keys: List[str] = None,
filter_invalid_bbox: Optional[bool] = True,
filter_empty_annotations: Optional[bool] = True,
_post_process_: Optional[Callable] = None,
) -> List[Dict]:
"""
Load a json file with COCO's annotation format.
Currently only supports instance segmentation annotations.
Args:
json_file (str): full path to the json file in COCO annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "coco", "cityscapes").
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
loaded_json (str): optional loaded json content, used in InMemoryCOCO to
avoid loading from json_file again.
Returns:
list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md)
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
2. When `dataset_name=='coco'`,
this function will translate COCO's
incontiguous category ids to contiguous ids in [0, 80).
"""
json_file = _cache_json_file(json_file)
if loaded_json is None:
coco_api = COCO(json_file)
else:
coco_api = InMemoryCOCO(loaded_json)
associations = coco_api.dataset.get("associations", {})
# Collect classes and remap them starting from 0
all_cat_ids = coco_api.getCatIds()
all_cats = coco_api.loadCats(all_cat_ids)
all_cat_names = [c["name"] for c in sorted(all_cats, key=lambda x: x["id"])]
# Setup id remapping
remapped_id = {}
for cat_id, cat in zip(all_cat_ids, all_cats):
remapped_id[cat_id] = all_cat_names.index(cat["name"])
# Register dataset in metadata catalog
if dataset_name is not None:
# overwrite attrs
meta_dict = MetadataCatalog.get(dataset_name).as_dict()
meta_dict["thing_classes"] = all_cat_names
meta_dict["thing_dataset_id_to_contiguous_id"] = remapped_id
# update MetadataCatalog (cannot change inplace, have to remove)
MetadataCatalog.remove(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta_dict)
# assert the change
assert MetadataCatalog.get(dataset_name).thing_classes == all_cat_names
# Sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
logger.info("Loaded {} images from {}".format(len(imgs), json_file))
for img in imgs:
association = associations.get(img["file_name"], {})
if association:
img["associations"] = association
# Return the coco converted to record list
return convert_to_dict_list(
image_root,
remapped_id,
imgs,
anns,
dataset_name=dataset_name,
all_cat_names=all_cat_names,
image_direct_copy_keys=image_direct_copy_keys,
filter_invalid_bbox=filter_invalid_bbox,
filter_empty_annotations=filter_empty_annotations,
_post_process_=_post_process_,
)
if __name__ == "__main__":
"""
Test the COCO json dataset loader.
Usage:
python -m detectron2.data.datasets.coco \
path/to/json path/to/image_root dataset_name
"""
import sys
import cv2
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = extended_coco_load(sys.argv[1], sys.argv[2], sys.argv[3], ["cat", "dog"])
logger.info("Done loading {} samples.".format(len(dicts)))
for d in dicts:
img = cv2.imread(d["file_name"])[:, :, ::-1]
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join("coco-data-vis", os.path.basename(d["file_name"]))
vis.save(fpath)
|
d2go-main
|
d2go/data/extended_coco.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def _cache_json_file(json_file):
# TODO: entirely rely on PathManager for caching
json_file = os.fspath(json_file)
return json_file
|
d2go-main
|
d2go/data/cache_util.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from d2go.data.extended_coco import _cache_json_file
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
from fvcore.common.timer import Timer
"""
This file contains functions to parse LVIS-format annotations into dicts in the
"Detectron2 format".
"""
logger = logging.getLogger(__name__)
def extended_lvis_load(json_file, image_root, dataset_name=None):
"""
Load a json file in LVIS's annotation format.
Args:
json_file (str): full path to the LVIS json annotation file.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md)
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
json_file = _cache_json_file(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
# sort indices for reproducible results
img_ids = sorted(list(lvis_api.imgs.keys()))
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
# Sanity check that each annotation has a unique id
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(
ann_ids
), "Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info(
"Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)
)
dataset_dicts = []
count_ignore_image_root_warning = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
if "://" not in img_dict["file_name"]:
file_name = img_dict["file_name"]
if img_dict["file_name"].startswith("COCO"):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming
# convention of 000000000000.jpg (LVIS v1 will fix this naming issue)
file_name = file_name[-16:]
record["file_name"] = os.path.join(image_root, file_name)
else:
if image_root is not None:
count_ignore_image_root_warning += 1
if count_ignore_image_root_warning == 1:
logger.warning(
(
"Found '://' in file_name: {}, ignore image_root: {}"
"(logged once per dataset)."
).format(img_dict["file_name"], image_root)
)
record["file_name"] = img_dict["file_name"]
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get(
"not_exhaustive_category_ids", []
)
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# Fails only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == image_id
obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
obj["category_id"] = (
anno["category_id"] - 1
) # Convert 1-indexed to 0-indexed
segm = anno["segmentation"]
# filter out invalid polygons (< 3 points)
valid_segm = [
poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
]
assert len(segm) == len(
valid_segm
), "Annotation contains an invalid polygon with < 3 points"
assert len(segm) > 0
obj["segmentation"] = segm
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if dataset_name:
meta = MetadataCatalog.get(dataset_name)
meta.thing_classes = get_extended_lvis_instances_meta(lvis_api)["thing_classes"]
return dataset_dicts
def get_extended_lvis_instances_meta(lvis_api):
cat_ids = lvis_api.get_cat_ids()
categories = lvis_api.load_cats(cat_ids)
assert min(cat_ids) == 1 and max(cat_ids) == len(
cat_ids
), "Category ids are not in [1, #categories], as expected"
extended_lvis_categories = [k for k in sorted(categories, key=lambda x: x["id"])]
thing_classes = [k["name"] for k in extended_lvis_categories]
meta = {"thing_classes": thing_classes}
return meta
if __name__ == "__main__":
"""
Test the LVIS json dataset loader.
Usage:
python -m detectron2.data.datasets.lvis \
path/to/json path/to/image_root dataset_name vis_limit
"""
import sys
import detectron2.data.datasets # noqa # add pre-defined metadata
import numpy as np
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from PIL import Image
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = extended_lvis_load(sys.argv[1], sys.argv[2], sys.argv[3])
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "lvis-data-vis"
os.makedirs(dirname, exist_ok=True)
for d in dicts[: int(sys.argv[4])]:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
|
d2go-main
|
d2go/data/extended_lvis.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import importlib
import logging
import os
from collections import namedtuple
from d2go.data.extended_coco import coco_text_load, extended_coco_load
from d2go.data.extended_lvis import extended_lvis_load
from d2go.data.keypoint_metadata_registry import get_keypoint_metadata
from d2go.utils.helper import get_dir_path
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.registry import Registry
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
D2GO_DATASETS_BASE_MODULE = "d2go.datasets"
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
LOAD_KWARGS = "load_kwargs"
COCO_REGISTER_FUNCTION_REGISTRY = Registry("COCO_REGISTER_FUNCTION_REGISTRY")
COCO_REGISTER_FUNCTION_REGISTRY.__doc__ = "Registry - coco register function"
InjectedCocoEntry = namedtuple("InjectedCocoEntry", ["func", "split_dict"])
INJECTED_COCO_DATASETS_LUT = {}
def get_coco_register_function(cfg):
name = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION
return COCO_REGISTER_FUNCTION_REGISTRY.get(name)
def _import_dataset(module_name):
return importlib.import_module(
"{}.{}".format(D2GO_DATASETS_BASE_MODULE, module_name)
)
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_extended_coco(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
load_kwargs = split_dict.get(LOAD_KWARGS, {})
# 1. register a function which returns dicts
load_coco_json_func = functools.partial(
extended_coco_load,
json_file=json_file,
image_root=image_root,
dataset_name=dataset_name,
**load_kwargs,
)
DatasetCatalog.register(dataset_name, load_coco_json_func)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "coco")
meta_data = split_dict.get("meta_data", {})
MetadataCatalog.get(dataset_name).set(
evaluator_type=evaluator_type,
json_file=json_file,
image_root=image_root,
**meta_data,
)
_add_additional_extended_coco_metadata(dataset_name)
@fb_overwritable()
def _add_additional_extended_coco_metadata(dataset_name):
pass
def _register_extended_lvis(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
# 1. register a function which returns dicts
load_lvis_json_func = functools.partial(
extended_lvis_load,
json_file=json_file,
image_root=image_root,
dataset_name=dataset_name,
)
DatasetCatalog.register(dataset_name, load_lvis_json_func)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "lvis")
MetadataCatalog.get(dataset_name).set(
evaluator_type=evaluator_type, json_file=json_file, image_root=image_root
)
def _register_coco_text(dataset_name, split_dict):
source_json_file = split_dict[ANN_FN]
coco_json_file = "/tmp/{}.json".format(dataset_name)
ARCHIVE_FN = "archive_file"
# 1. register a function which returns dicts
DatasetCatalog.register(
dataset_name,
functools.partial(
coco_text_load,
coco_json_file=coco_json_file,
image_root=split_dict[IM_DIR],
source_json_file=source_json_file,
dataset_name=dataset_name,
archive_file=split_dict.get(ARCHIVE_FN, None),
),
)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "coco")
MetadataCatalog.get(dataset_name).set(
json_file=coco_json_file,
image_root=split_dict[IM_DIR],
evaluator_type=evaluator_type,
)
def inject_coco_datasets(cfg):
names = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.NAMES
im_dirs = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS
json_files = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES
metadata_type = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA
_register_func = get_coco_register_function(cfg)
assert len(names) == len(im_dirs) == len(json_files)
for ds_index, (name, im_dir, json_file) in enumerate(
zip(names, im_dirs, json_files)
):
split_dict = {IM_DIR: im_dir, ANN_FN: json_file}
if len(metadata_type) != 0:
split_dict["meta_data"] = get_keypoint_metadata(metadata_type[ds_index])
logger.info("Inject coco dataset {}: {}".format(name, split_dict))
_register_func(name, split_dict)
INJECTED_COCO_DATASETS_LUT[name] = InjectedCocoEntry(
func=_register_func, split_dict=split_dict
)
def register_dataset_split(dataset_name, split_dict):
"""
Register a dataset to detectron2's DatasetCatalog and MetadataCatalog.
"""
_DATASET_TYPE_LOAD_FUNC_MAP = {
"COCODataset": _register_extended_coco,
"COCOText": _register_coco_text,
"COCOTextDataset": _register_coco_text,
"LVISDataset": _register_extended_lvis,
}
factory = split_dict.get("DS_TYPE", "COCODataset")
_DATASET_TYPE_LOAD_FUNC_MAP[factory](
dataset_name=dataset_name, split_dict=split_dict
)
def register_json_datasets():
json_dataset_names = [
os.path.splitext(filename)[0]
for filename in os.listdir(
get_dir_path(D2GO_DATASETS_BASE_MODULE.replace(".", "/"))
)
if filename.startswith("json_dataset_")
]
json_dataset_names = [
x
for x in json_dataset_names
if x
not in [
"json_dataset_lvis",
"json_dataset_oculus_external",
"json_dataset_people_ai_foot_tracking",
]
]
# load all splits from json datasets
all_splits = {}
for dataset in json_dataset_names:
module = _import_dataset(dataset)
assert (
len(set(all_splits).intersection(set(module.DATASETS))) == 0
), "Name confliction when loading {}".format(dataset)
all_splits.update(module.DATASETS)
# register all splits
for split_name in all_splits:
split_dict = all_splits[split_name]
register_dataset_split(split_name, split_dict)
def register_builtin_datasets():
builtin_dataset_names = [
os.path.splitext(filename)[0]
for filename in os.listdir(
get_dir_path(D2GO_DATASETS_BASE_MODULE.replace(".", "/"))
)
if filename.startswith("builtin_dataset_")
]
for dataset in builtin_dataset_names:
_import_dataset(dataset)
def register_dynamic_datasets(cfg):
for dataset in cfg.D2GO_DATA.DATASETS.DYNAMIC_DATASETS:
assert dataset.startswith("dynamic_dataset_")
_import_dataset(dataset)
|
d2go-main
|
d2go/data/datasets.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
d2go-main
|
d2go/data/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import contextlib
import json
import logging
import os
import re
import shutil
import tempfile
from collections import defaultdict
from unittest import mock
import numpy as np
import torch.utils.data as data
from d2go.config import temp_defrost
from d2go.data.datasets import (
ANN_FN,
IM_DIR,
INJECTED_COCO_DATASETS_LUT,
InjectedCocoEntry,
register_dataset_split,
)
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.build import (
get_detection_dataset_dicts as d2_get_detection_dataset_dicts,
)
from detectron2.data.common import set_default_dataset_from_list_serialize_method
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from mobile_cv.torch.utils_pytorch.shareables import SharedList
logger = logging.getLogger(__name__)
class AdhocDatasetManager:
# mapping from the new dataset name a AdhocDataset instance
_REGISTERED = {}
@staticmethod
def add(adhoc_ds):
assert isinstance(adhoc_ds, AdhocDataset)
if adhoc_ds.new_ds_name in AdhocDatasetManager._REGISTERED:
logger.warning(
"Adhoc dataset {} has already been added, skip adding it".format(
adhoc_ds.new_ds_name
)
)
else:
logger.info("Adding new adhoc dataset {} ...".format(adhoc_ds.new_ds_name))
AdhocDatasetManager._REGISTERED[adhoc_ds.new_ds_name] = adhoc_ds
adhoc_ds.register_catalog()
@staticmethod
def remove(adhoc_ds):
try:
assert isinstance(adhoc_ds, AdhocDataset)
if adhoc_ds.new_ds_name not in AdhocDatasetManager._REGISTERED:
logger.warning(
"Adhoc dataset {} has already been removed, skip removing it".format(
adhoc_ds.new_ds_name
)
)
else:
logger.info("Remove adhoc dataset {} ...".format(adhoc_ds.new_ds_name))
del AdhocDatasetManager._REGISTERED[adhoc_ds.new_ds_name]
finally:
adhoc_ds.cleanup()
@staticmethod
@atexit.register
def _atexit():
for ds in AdhocDatasetManager._REGISTERED.values():
logger.info("Remove remaining adhoc dataset: {}".format(ds.new_ds_name))
ds.cleanup()
class AdhocDataset(object):
def __init__(self, new_ds_name):
assert isinstance(new_ds_name, str)
self.new_ds_name = new_ds_name
def register_catalog(self):
raise NotImplementedError()
def cleanup(self):
raise NotImplementedError()
class CallFuncWithJsonFile(object):
"""
The instance of this class is parameterless callable that calls its `func` using its
`json_file`, it can be used to register in DatasetCatalog which later on provide
access to the json file.
"""
def __init__(self, func, json_file):
self.func = func
self.json_file = json_file
def __call__(self):
return self.func(self.json_file)
class CallFuncWithNameAndJsonFile(object):
"""
Same purpose as CallFuncWithJsonFile but also pass name to `func` as arguments
"""
def __init__(self, func, json_file, name):
self.func = func
self.name = name
self.json_file = json_file
def __call__(self):
return self.func(self.json_file, self.name)
class AdhocCOCODataset(AdhocDataset):
def __init__(self, src_ds_name, new_ds_name):
super().__init__(new_ds_name)
# NOTE: only support single source dataset now
assert isinstance(src_ds_name, str)
self.src_ds_name = src_ds_name
def new_json_dict(self, json_dict):
raise NotImplementedError()
def register_catalog(self):
"""
Adhoc COCO (json) dataset assumes the derived dataset can be created by only
changing the json file, currently it supports two sources: 1) the dataset is
registered using standard COCO registering functions in D2 or
register_dataset_split from D2Go, this way it uses `json_file` from the metadata
to access the json file. 2) the load func in DatasetCatalog is an instance of
CallFuncWithJsonFile, which gives access to the json_file. In both cases,
metadata will be the same except for the `name` and potentially `json_file`.
"""
logger.info("Register {} from {}".format(self.new_ds_name, self.src_ds_name))
metadata = MetadataCatalog.get(self.src_ds_name)
load_func = DatasetCatalog[self.src_ds_name]
src_json_file = (
load_func.json_file
if isinstance(load_func, CallFuncWithJsonFile)
else metadata.json_file
)
# TODO cache ?
with PathManager.open(src_json_file) as f:
json_dict = json.load(f)
assert "images" in json_dict, "Only support COCO-style json!"
json_dict = self.new_json_dict(json_dict)
self.tmp_dir = tempfile.mkdtemp(prefix="detectron2go_tmp_datasets")
tmp_file = os.path.join(self.tmp_dir, "{}.json".format(self.new_ds_name))
with open(tmp_file, "w") as f:
json.dump(json_dict, f)
# re-register DatasetCatalog
if isinstance(load_func, CallFuncWithJsonFile):
new_func = CallFuncWithJsonFile(func=load_func.func, json_file=tmp_file)
DatasetCatalog.register(self.new_ds_name, new_func)
elif isinstance(load_func, CallFuncWithNameAndJsonFile):
new_func = CallFuncWithNameAndJsonFile(
func=load_func.func, name=self.new_ds_name, json_file=tmp_file
)
DatasetCatalog.register(self.new_ds_name, new_func)
elif self.src_ds_name in INJECTED_COCO_DATASETS_LUT:
_src_func, _src_dict = INJECTED_COCO_DATASETS_LUT[self.src_ds_name]
split_dict = {**_src_dict, ANN_FN: tmp_file, IM_DIR: metadata.image_root}
_src_func(self.new_ds_name, split_dict=split_dict)
INJECTED_COCO_DATASETS_LUT[self.new_ds_name] = InjectedCocoEntry(
func=_src_func, split_dict=split_dict
)
else:
# NOTE: only supports COCODataset as DS_TYPE since we cannot reconstruct
# the split_dict
register_dataset_split(
self.new_ds_name,
split_dict={ANN_FN: tmp_file, IM_DIR: metadata.image_root},
)
# re-regisister MetadataCatalog
metadata_dict = metadata.as_dict()
metadata_dict["name"] = self.new_ds_name
if "json_file" in metadata_dict:
metadata_dict["json_file"] = tmp_file
if MetadataCatalog.get(self.new_ds_name):
MetadataCatalog.remove(self.new_ds_name)
MetadataCatalog.get(self.new_ds_name).set(**metadata_dict)
def cleanup(self):
# remove temporarily registered dataset and json file
DatasetCatalog.pop(self.new_ds_name, None)
MetadataCatalog.pop(self.new_ds_name, None)
if hasattr(self, "tmp_dir"):
shutil.rmtree(self.tmp_dir)
class COCOSubsetWithNImages(AdhocCOCODataset):
_SUPPORTED_SAMPLING = ["frontmost", "random"]
def __init__(self, src_ds_name, num_images, sampling):
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}_{}{}".format(src_ds_name, sampling, num_images),
)
self.num_images = num_images
self.sampling = sampling
def new_json_dict(self, json_dict):
all_images = json_dict["images"]
if self.sampling == "frontmost":
new_images = all_images[: self.num_images]
elif self.sampling == "random":
# use fixed seed so results are repeatable
indices = np.random.RandomState(seed=42).permutation(len(all_images))
new_images = [all_images[i] for i in indices[: self.num_images]]
else:
raise NotImplementedError(
"COCOSubsetWithNImages doesn't support sampling method: {}".format(
self.sampling
)
)
new_image_ids = {im["id"] for im in new_images}
new_annotations = [
ann for ann in json_dict["annotations"] if ann["image_id"] in new_image_ids
]
json_dict["images"] = new_images
json_dict["annotations"] = new_annotations
return json_dict
class COCOSubsetWithGivenImages(AdhocCOCODataset):
def __init__(self, src_ds_name, file_names, prefix="given"):
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}_{}{}".format(src_ds_name, prefix, len(file_names)),
)
self.file_names = file_names
def new_json_dict(self, json_dict):
all_images = json_dict["images"]
file_name_to_im = {im["file_name"]: im for im in all_images}
new_images = [file_name_to_im[file_name] for file_name in self.file_names]
# re-assign image id to keep the order (COCO loads images by id order)
old_id_to_new_id = {im["id"]: i for i, im in enumerate(new_images)}
new_annotations = [
ann
for ann in json_dict["annotations"]
if ann["image_id"] in old_id_to_new_id
]
# update image id
for im in new_images:
im["id"] = old_id_to_new_id[im["id"]]
for anno in new_annotations:
anno["image_id"] = old_id_to_new_id[anno["image_id"]]
json_dict["images"] = new_images
json_dict["annotations"] = new_annotations
return json_dict
class COCOWithClassesToUse(AdhocCOCODataset):
def __init__(self, src_ds_name, classes_to_use):
# check if name is already a derived class and try to reverse it
res = re.match("(?P<src>.+)@(?P<num>[0-9]+)classes", src_ds_name)
if res is not None:
src_ds_name = res["src"]
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}@{}classes".format(src_ds_name, len(classes_to_use)),
)
self.classes_to_use = classes_to_use
def new_json_dict(self, json_dict):
categories = json_dict["categories"]
new_categories = [
cat for cat in categories if cat["name"] in self.classes_to_use
]
new_category_ids = {cat["id"] for cat in new_categories}
new_annotations = [
ann
for ann in json_dict["annotations"]
if ann["category_id"] in new_category_ids
]
json_dict["categories"] = new_categories
json_dict["annotations"] = new_annotations
return json_dict
class ClipLengthGroupedDataset(data.IterableDataset):
"""
Batch data that have same clip length and similar aspect ratio.
In this implementation, images with same length and whose aspect
ratio < (or >) 1 will be batched together.
This makes training with different clip length possible and improves
training speed because the images then need less padding to form a batch.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
self._buckets = defaultdict(list)
def __iter__(self):
for d in self.dataset:
clip_length = len(d["frames"])
h, w = d["height"], d["width"]
aspect_ratio_bucket_id = 0 if h > w else 1
bucket = self._buckets[(clip_length, aspect_ratio_bucket_id)]
bucket.append(d)
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
@contextlib.contextmanager
def register_sub_dataset_with_n_images(dataset_name, num_images, sampling):
"""
Temporarily register a sub-dataset created from `dataset_name`, with the first
`num_images` from it.
"""
# when `num_images` is not larger than 0, return original dataset
if num_images <= 0:
yield dataset_name
return
# only support coco for now
assert sampling in COCOSubsetWithNImages._SUPPORTED_SAMPLING
new_dataset = COCOSubsetWithNImages(dataset_name, num_images, sampling)
AdhocDatasetManager.add(new_dataset)
try:
yield new_dataset.new_ds_name
finally:
AdhocDatasetManager.remove(new_dataset)
@contextlib.contextmanager
def register_sub_dataset_with_given_images(*args, **kwargs):
new_dataset = COCOSubsetWithGivenImages(*args, **kwargs)
AdhocDatasetManager.add(new_dataset)
AdhocDatasetManager.add(new_dataset)
try:
yield new_dataset.new_ds_name
finally:
AdhocDatasetManager.remove(new_dataset)
@contextlib.contextmanager
def maybe_subsample_n_images(cfg, is_train=False):
"""
Create a new config whose train/test datasets only take a subsample of
`max_images` image. Use all images (non-op) when `max_images` <= 0.
"""
max_images = cfg.D2GO_DATA.TEST.MAX_IMAGES
sampling = cfg.D2GO_DATA.TEST.SUBSET_SAMPLING
with contextlib.ExitStack() as stack: # python 3.3+
new_splits = tuple(
stack.enter_context(
register_sub_dataset_with_n_images(ds, max_images, sampling)
)
for ds in (cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST)
)
new_cfg = cfg.clone()
with temp_defrost(new_cfg):
if is_train:
new_cfg.DATASETS.TRAIN = new_splits
else:
new_cfg.DATASETS.TEST = new_splits
yield new_cfg
def update_cfg_if_using_adhoc_dataset(cfg):
if cfg.D2GO_DATA.DATASETS.TRAIN_CATEGORIES:
new_train_datasets = [
COCOWithClassesToUse(name, cfg.D2GO_DATA.DATASETS.TRAIN_CATEGORIES)
for name in cfg.DATASETS.TRAIN
]
[AdhocDatasetManager.add(new_ds) for new_ds in new_train_datasets]
with temp_defrost(cfg):
cfg.DATASETS.TRAIN = tuple(ds.new_ds_name for ds in new_train_datasets)
# If present, we also need to update the data set names for the WeightedTrainingSampler
if cfg.DATASETS.TRAIN_REPEAT_FACTOR:
for ds_to_repeat_factor in cfg.DATASETS.TRAIN_REPEAT_FACTOR:
original_ds_name = ds_to_repeat_factor[0]
# Search corresponding data set name, to not rely on the order
for ds in new_train_datasets:
if ds.src_ds_name == original_ds_name:
ds_to_repeat_factor[0] = ds.new_ds_name
break
if cfg.D2GO_DATA.DATASETS.TEST_CATEGORIES:
new_test_datasets = [
COCOWithClassesToUse(ds, cfg.D2GO_DATA.DATASETS.TEST_CATEGORIES)
for ds in cfg.DATASETS.TEST
]
[AdhocDatasetManager.add(new_ds) for new_ds in new_test_datasets]
with temp_defrost(cfg):
cfg.DATASETS.TEST = tuple(ds.new_ds_name for ds in new_test_datasets)
return cfg
class _FakeListObj(object):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
def __getitem__(self, idx):
raise NotImplementedError(
"This is a fake list, accessing this list should not happen"
)
def local_master_get_detection_dataset_dicts(*args, **kwargs):
logger.info("Only load dataset dicts on local master process ...")
dataset_dicts = (
d2_get_detection_dataset_dicts(*args, **kwargs)
if comm.get_local_rank() == 0
else []
)
comm.synchronize()
dataset_size = comm.all_gather(len(dataset_dicts))[0]
if comm.get_local_rank() != 0:
dataset_dicts = _FakeListObj(dataset_size)
return dataset_dicts
@contextlib.contextmanager
def configure_dataset_creation(cfg):
"""
Context manager for configure settings used during dataset creating. It supports:
- offload the dataset to shared memory to reduce RAM usage.
- (experimental) offload the dataset to disk cache to further reduce RAM usage.
- Replace D2's get_detection_dataset_dicts with a local-master-only version.
"""
dataset_from_list_offload_method = SharedList # use SharedList by default
if cfg.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED:
# delay the import to avoid atexit cleanup
from d2go.data.disk_cache import DiskCachedList
dataset_from_list_offload_method = DiskCachedList
load_dataset_from_local_master = cfg.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED
with contextlib.ExitStack() as stack:
ctx_managers = [
set_default_dataset_from_list_serialize_method(
dataset_from_list_offload_method
)
]
if load_dataset_from_local_master:
ctx_managers.append(
mock.patch(
"detectron2.data.build.get_detection_dataset_dicts",
side_effect=local_master_get_detection_dataset_dicts,
)
)
for ctx in ctx_managers:
stack.enter_context(ctx)
yield
|
d2go-main
|
d2go/data/utils.py
|
#!/usr/bin/env python3
from typing import List, NamedTuple, Tuple
from detectron2.utils.registry import Registry
KEYPOINT_METADATA_REGISTRY = Registry("KEYPOINT_METADATA")
KEYPOINT_METADATA_REGISTRY.__doc__ = "Registry keypoint metadata definitions"
class KeypointMetadata(NamedTuple):
names: List[str]
flip_map: List[Tuple[str, str]]
connection_rules: List[Tuple[str, str, Tuple[int, int, int]]]
def to_dict(self):
return {
"keypoint_names": self.names,
"keypoint_flip_map": self.flip_map,
"keypoint_connection_rules": self.connection_rules,
}
def get_keypoint_metadata(name):
return KEYPOINT_METADATA_REGISTRY.get(name)().to_dict()
|
d2go-main
|
d2go/data/keypoint_metadata_registry.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.utils.registry import Registry
D2GO_DATA_MAPPER_REGISTRY = Registry("D2GO_DATA_MAPPER")
def build_dataset_mapper(cfg, is_train, *args, **kwargs):
name = cfg.D2GO_DATA.MAPPER.NAME
return D2GO_DATA_MAPPER_REGISTRY.get(name)(cfg, is_train, *args, **kwargs)
|
d2go-main
|
d2go/data/dataset_mappers/build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.data_reading import (
read_image_with_prefetch,
read_sem_seg_file_with_prefetch,
)
from d2go.utils.helper import retryable
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.data.transforms.augmentation import AugInput, AugmentationList
logger = logging.getLogger(__name__)
PREFETCHED_FILE_NAME = "prefetch_image"
PREFETCHED_SEM_SEG_FILE_NAME = "prefetch_sem_seg"
@D2GO_DATA_MAPPER_REGISTRY.register()
class D2GoDatasetMapper(object):
def __init__(self, cfg, is_train=True, image_loader=None, tfm_gens=None):
self.tfm_gens = (
tfm_gens
if tfm_gens is not None
else utils.build_transform_gen(cfg, is_train)
)
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
# D2GO NOTE: when INPUT.CROP.ENABLED, don't allow using RandomCropOp
assert all(not isinstance(gen, T.RandomCrop) for gen in self.tfm_gens)
else:
self.crop_gen = None
# fmt: off
self.img_format = cfg.INPUT.FORMAT # noqa
self.mask_on = cfg.MODEL.MASK_ON # noqa
self.mask_format = cfg.INPUT.MASK_FORMAT # noqa
self.keypoint_on = cfg.MODEL.KEYPOINT_ON # noqa
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
# Setup image loader:
self.image_loader = image_loader
self.backfill_size = cfg.D2GO_DATA.MAPPER.BACKFILL_SIZE
self.retry = cfg.D2GO_DATA.MAPPER.RETRY
self.catch_exception = cfg.D2GO_DATA.MAPPER.CATCH_EXCEPTION
if self.backfill_size:
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
logger.warning(
"ASPECT_RATIO_GROUPING may not work if image's width & height"
" are not given in json dataset when calling extended_coco_load,"
" if you encounter issue, consider disable ASPECT_RATIO_GROUPING."
)
self._error_count = 0
self._total_counts = 0
self._error_types = {}
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
image, dataset_dict = self._custom_transform(image, dataset_dict)
inputs = AugInput(image=image)
if "annotations" not in dataset_dict:
transforms = AugmentationList(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens
)(inputs)
image = inputs.image
else:
# pass additional arguments, will only be used when the Augmentation
# takes `annotations` as input
inputs.annotations = dataset_dict["annotations"]
inputs.boxes = [
utils.get_bbox(obj)
for obj in dataset_dict["annotations"]
if obj.get("iscrowd", 0) == 0
]
# Crop around an instance if there are instances in the image.
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
inputs.image = crop_tfm.apply_image(image)
transforms = AugmentationList(self.tfm_gens)(inputs)
image = inputs.image
if self.crop_gen:
transforms = crop_tfm + transforms
# Cache identical transforms in dataset_dict for subclass mappers
# TODO T122215878 Find more explicit way to expose transforms used
dataset_dict["transforms"] = transforms
image_shape = image.shape[:2] # h, w
if image.ndim == 2:
image = np.expand_dims(image, 2)
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
# Create a tight bounding box from masks, useful when image is cropped
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = read_sem_seg_file_with_prefetch(
dataset_dict.pop("sem_seg_file_name"),
prefetched=dataset_dict.get(PREFETCHED_SEM_SEG_FILE_NAME, None),
)
if len(sem_seg_gt.shape) > 2:
sem_seg_gt = sem_seg_gt.squeeze(2)
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
dataset_dict["sem_seg"] = sem_seg_gt
# extend standard D2 semantic segmentation to support multiple segmentation
# files, each file can represent a class
if "multi_sem_seg_file_names" in dataset_dict:
raise NotImplementedError()
if "_post_process_" in dataset_dict:
proc_func = dataset_dict.pop("_post_process_")
dataset_dict = proc_func(dataset_dict)
return dataset_dict
def __call__(self, dataset_dict):
self._total_counts += 1
@retryable(num_tries=self.retry, sleep_time=0.1)
def _f():
return self._original_call(dataset_dict)
if not self.catch_exception:
return _f()
try:
return _f()
except Exception as e:
self._error_count += 1
# if self._error_count % 10 == 1:
# # print the stacktrace for easier debugging
# traceback.print_exc()
error_type = type(e).__name__
self._error_types[error_type] = self._error_types.get(error_type, 0) + 1
if self._error_count % 100 == 0:
logger.warning(
"{}Error when applying transform for dataset_dict: {};"
" error rate {}/{} ({:.2f}%), msg: {}".format(
self._get_logging_prefix(),
dataset_dict,
self._error_count,
self._total_counts,
100.0 * self._error_count / self._total_counts,
repr(e),
)
)
self._log_error_type_stats()
# NOTE: the contract with MapDataset allows return `None` such that
# it'll randomly use other element in the dataset. We use this
# feature to handle error.
return None
def _get_logging_prefix(self):
worker_info = torch.utils.data.get_worker_info()
if not worker_info:
return ""
prefix = "[worker: {}/{}] ".format(worker_info.id, worker_info.num_workers)
return prefix
def _log_error_type_stats(self):
error_type_count_msgs = [
"{}: {}/{} ({}%)".format(
k, v, self._total_counts, 100.0 * v / self._total_counts
)
for k, v in self._error_types.items()
]
logger.warning(
"{}Error statistics:\n{}".format(
self._get_logging_prefix(), "\n".join(error_type_count_msgs)
)
)
def _read_image(self, dataset_dict, format=None):
if not (self.image_loader and self.image_loader.support(dataset_dict)):
# fallback to use D2's read_image
image = read_image_with_prefetch(
dataset_dict["file_name"],
format=format,
prefetched=dataset_dict.get(PREFETCHED_FILE_NAME),
)
if self.backfill_size:
h, w, _ = image.shape
dataset_dict["width"] = w
dataset_dict["height"] = h
return image
image = self.image_loader(dataset_dict)
if self.backfill_size:
dataset_dict["width"] = image.width
dataset_dict["height"] = image.height
return utils.convert_PIL_to_numpy(image, format)
def _custom_transform(self, image, dataset_dict):
"""
Override this method to inject custom transform.
"""
return image, dataset_dict
def __repr__(self):
return (
self.__class__.__name__
+ ":\n"
+ "\n".join(
[
" is_train: {}".format(self.is_train),
" image_loader: {}".format(self.image_loader),
" tfm_gens: \n{}".format(
"\n".join([" - {}".format(x) for x in self.tfm_gens])
),
]
)
)
|
d2go-main
|
d2go/data/dataset_mappers/d2go_dataset_mapper.py
|
from io import BytesIO
import numpy as np
from detectron2.data import detection_utils as utils
from detectron2.utils.file_io import PathManager
from PIL import Image
def read_image_with_prefetch(file_name, format=None, prefetched=None):
if prefetched is None:
return utils.read_image(file_name, format)
image = Image.open(BytesIO(prefetched.numpy().view()))
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = utils._apply_exif_orientation(image)
return utils.convert_PIL_to_numpy(image, format)
def read_sem_seg_file_with_prefetch(file_name: str, prefetched=None):
"""
Segmentation mask annotations can be stored as:
.PNG files
.npy uncompressed numpy files
"""
assert file_name.endswith(".png") or file_name.endswith(".npy")
sem_seg_type = file_name[-len(".---") :]
if sem_seg_type == ".png":
return read_image_with_prefetch(file_name, format="L", prefetched=prefetched)
elif sem_seg_type == ".npy":
if prefetched is None:
with PathManager.open(file_name, "rb") as f:
return np.load(f)
else:
return prefetched.numpy()
|
d2go-main
|
d2go/data/dataset_mappers/data_reading.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.data.dataset_mappers.build import (
build_dataset_mapper,
D2GO_DATA_MAPPER_REGISTRY,
)
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from d2go.data.dataset_mappers.rotated_dataset_mapper import RotatedDatasetMapper
__all__ = [
"build_dataset_mapper",
"D2GO_DATA_MAPPER_REGISTRY",
"D2GoDatasetMapper",
"RotatedDatasetMapper",
]
# Populating registreis
# @fb-only: from d2go.data.dataset_mappers import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/data/dataset_mappers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.structures import BoxMode, Instances, RotatedBoxes
logger = logging.getLogger(__name__)
@D2GO_DATA_MAPPER_REGISTRY.register()
class RotatedDatasetMapper(D2GoDatasetMapper):
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
assert not self.load_proposals, "Not supported!"
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# Convert dataset_dict["annotations"] to dataset_dict["instances"]
annotations = [
obj
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Convert either rotated box or horizontal box to XYWHA_ABS format
original_boxes = [
BoxMode.convert(
box=obj["bbox"],
from_mode=obj["bbox_mode"],
to_mode=BoxMode.XYWHA_ABS,
)
for obj in annotations
]
transformed_boxes = transforms.apply_rotated_box(
np.array(original_boxes, dtype=np.float64)
)
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[obj["category_id"] for obj in annotations], dtype=torch.int64
)
instances.gt_boxes = RotatedBoxes(transformed_boxes)
instances.gt_boxes.clip(image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
|
d2go-main
|
d2go/data/dataset_mappers/rotated_dataset_mapper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import logging
from typing import Dict, List, Optional, Tuple
from detectron2.config import CfgNode
from detectron2.data import transforms as d2T
from detectron2.utils.registry import Registry
logger = logging.getLogger(__name__)
TRANSFORM_OP_REGISTRY = Registry("D2GO_TRANSFORM_REGISTRY")
def _json_load(arg_str: str) -> Dict:
try:
return json.loads(arg_str)
except json.decoder.JSONDecodeError as e:
logger.warning("Can't load arg_str: {}".format(arg_str))
raise e
# example repr: "ResizeShortestEdgeOp"
@TRANSFORM_OP_REGISTRY.register()
def ResizeShortestEdgeOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[d2T.Transform]:
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert (
len(min_size) == 2
), "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
tfm_gens = []
if not min_size == 0: # set to zero to disable resize
tfm_gens.append(d2T.ResizeShortestEdge(min_size, max_size, sample_style))
return tfm_gens
# example repr: "ResizeShortestEdgeSquareOp"
@TRANSFORM_OP_REGISTRY.register()
def ResizeShortestEdgeSquareOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[d2T.Transform]:
"""Resize the input to square using INPUT.MIN_SIZE_TRAIN or INPUT.MIN_SIZE_TEST
without keeping aspect ratio
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
assert (
isinstance(min_size, (list, tuple)) and len(min_size) == 1
), "Only a signle size is supported"
min_size = min_size[0]
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
tfm_gens = []
if not min_size == 0: # set to zero to disable resize
tfm_gens.append(d2T.Resize(shape=[min_size, min_size]))
return tfm_gens
@TRANSFORM_OP_REGISTRY.register()
def ResizeOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[d2T.Transform]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [d2T.Resize(**kwargs)]
_TRANSFORM_REPR_SEPARATOR = "::"
def parse_tfm_gen_repr(tfm_gen_repr: str) -> Tuple[str, Optional[str]]:
if tfm_gen_repr.count(_TRANSFORM_REPR_SEPARATOR) == 0:
return tfm_gen_repr, None
else:
# Split only after first delimiter, to allow for:
# - nested transforms, e.g:
# 'SomeTransformOp::{"args": ["SubTransform2Op::{\\"param1\\": 0, \\"param2\\": false}", "SubTransform2Op::{\\"param1\\": 0.8}"], "other_args": 2}'
# - list of transforms, e.g.:
# ["SubTransform2Op::{\\"param1\\": 0, \\"param2\\": false}", "SubTransform2Op::{\\"param1\\": 0.8}"]
# TODO(T144470024): Support recursive parsing. For now, it's user responsibility to ensure the nested transforms are parsed correctly.
return tfm_gen_repr.split(_TRANSFORM_REPR_SEPARATOR, 1)
def build_transform_gen(
cfg: CfgNode, is_train: bool, tfm_gen_repr_list: Optional[List[str]] = None
) -> List[d2T.Transform]:
"""
This function builds a list of TransformGen or Transform objects using a list of
strings (`tfm_gen_repr_list). If list is not provided, cfg.D2GO_DATA.AUG_OPS.TRAIN/TEST is used.
Each string (aka. `tfm_gen_repr`) will be split into `name` and `arg_str` (separated by "::");
the `name` will be used to lookup the registry while `arg_str` will be used as argument.
Each function in registry needs to take `cfg`, `arg_str` and `is_train` as
input, and return a list of TransformGen or Transform objects.
"""
tfm_gen_repr_list = tfm_gen_repr_list or (
cfg.D2GO_DATA.AUG_OPS.TRAIN if is_train else cfg.D2GO_DATA.AUG_OPS.TEST
)
tfm_gens = [
TRANSFORM_OP_REGISTRY.get(name)(cfg, arg_str, is_train)
for name, arg_str in [
parse_tfm_gen_repr(tfm_gen_repr) for tfm_gen_repr in tfm_gen_repr_list
]
]
assert all(isinstance(gens, list) for gens in tfm_gens)
tfm_gens = [gen for gens in tfm_gens for gen in gens]
assert all(isinstance(gen, (d2T.Transform, d2T.TransformGen)) for gen in tfm_gens)
return tfm_gens
|
d2go-main
|
d2go/data/transforms/build.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Callable, List, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data import detection_utils as du
from detectron2.data.transforms.transform import Transform
from fvcore.transforms.transform import BlendTransform
class InvertibleColorTransform(Transform):
"""
Generic wrapper for invertible photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op: Callable, inverse_op: Callable):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
if not callable(inverse_op):
raise ValueError("inverse_op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
return self.op(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
def inverse(self) -> Transform:
return InvertibleColorTransform(self.inverse_op, self.op)
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
class RandomContrastYUV(aug.Augmentation):
"""
Randomly transforms contrast for images in YUV format.
See similar:
detectron2.data.transforms.RandomContrast,
detectron2.data.transforms.RandomBrightness
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
w = np.random.uniform(self.intensity_min, self.intensity_max)
pure_gray = np.zeros_like(img)
pure_gray[:, :, 0] = 0.5
return BlendTransform(src_image=pure_gray, src_weight=1 - w, dst_weight=w)
class RandomSaturationYUV(aug.Augmentation):
"""
Randomly transforms saturation for images in YUV format.
See similar: detectron2.data.transforms.RandomSaturation
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
assert (
len(img.shape) == 3 and img.shape[-1] == 3
), f"Expected (H, W, 3), image shape {img.shape}"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = np.zeros_like(img)
grayscale[:, :, 0] = img[:, :, 0]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
def convert_rgb_to_yuv_bt601(image: np.ndarray) -> np.ndarray:
"""Convert RGB image in (H, W, C) to YUV format
image: range 0 ~ 255
"""
image = image / 255.0
image = np.dot(image, np.array(du._M_RGB2YUV).T)
return image
def convery_yuv_bt601_to_rgb(image: np.ndarray) -> np.ndarray:
return du.convert_image_to_rgb(image, "YUV-BT.601")
class RGB2YUVBT601(aug.Augmentation):
def __init__(self):
super().__init__()
self.trans = InvertibleColorTransform(
convert_rgb_to_yuv_bt601, convery_yuv_bt601_to_rgb
)
def get_transform(self, image) -> Transform:
return self.trans
class YUVBT6012RGB(aug.Augmentation):
def __init__(self):
super().__init__()
self.trans = InvertibleColorTransform(
convery_yuv_bt601_to_rgb, convert_rgb_to_yuv_bt601
)
def get_transform(self, image) -> Transform:
return self.trans
def build_func(
cfg: CfgNode, arg_str: str, is_train: bool, obj
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [obj(**kwargs)]
@TRANSFORM_OP_REGISTRY.register()
def RandomContrastYUVOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RandomContrastYUV)
@TRANSFORM_OP_REGISTRY.register()
def RandomSaturationYUVOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RandomSaturationYUV)
@TRANSFORM_OP_REGISTRY.register()
def RGB2YUVBT601Op(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RGB2YUVBT601)
@TRANSFORM_OP_REGISTRY.register()
def YUVBT6012RGBOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=YUVBT6012RGB)
|
d2go-main
|
d2go/data/transforms/color_yuv.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
import torchvision.transforms as tvtf
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from d2go.data.transforms.tensor import Array2Tensor, Tensor2Array
from detectron2.config import CfgNode
from fvcore.transforms.transform import Transform
class ToTensorWrapper:
def __init__(self, transform):
self.a2t = Array2Tensor(preserve_dtype=True)
self.transform = transform
self.t2a = Tensor2Array()
def __call__(self, img: np.ndarray):
return self.t2a.apply_image(self.transform(self.a2t.apply_image(img)))
class RandAugmentImage(Transform):
"""Rand Augment transform, only support image transformation"""
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.RandAugment(
num_ops, magnitude, num_magnitude_bins, interpolation, fill
)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class TrivialAugmentWideImage(Transform):
"""TrivialAugmentWide transform, only support image transformation"""
def __init__(
self,
num_magnitude_bins: int = 31,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.TrivialAugmentWide(num_magnitude_bins, interpolation, fill)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class AugMixImage(Transform):
"""AugMix transform, only support image transformation"""
def __init__(
self,
severity: int = 3,
mixture_width: int = 3,
chain_depth: int = -1,
alpha: float = 1.0,
all_ops: bool = True,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.AugMix(
severity, mixture_width, chain_depth, alpha, all_ops, interpolation, fill
)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
# example repr: 'RandAugmentImageOp::{"magnitude": 9}'
@TRANSFORM_OP_REGISTRY.register()
def RandAugmentImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandAugmentImage(**kwargs)]
# example repr: 'TrivialAugmentWideImageOp::{"num_magnitude_bins": 31}'
@TRANSFORM_OP_REGISTRY.register()
def TrivialAugmentWideImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [TrivialAugmentWideImage(**kwargs)]
# example repr: 'AugMixImageOp::{"severity": 3}'
@TRANSFORM_OP_REGISTRY.register()
def AugMixImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [AugMixImage(**kwargs)]
|
d2go-main
|
d2go/data/transforms/auto_aug.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import random
from typing import List, Optional, Tuple
import cv2
import numpy as np
import torchvision.transforms as T
from d2go.data.transforms.build import TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.augmentation import TransformGen
from fvcore.transforms.transform import NoOpTransform, Transform
class AffineTransform(Transform):
def __init__(
self,
M: np.ndarray,
img_w: int,
img_h: int,
flags: Optional[int] = None,
border_mode: Optional[int] = None,
is_inversed_M: bool = False,
):
"""
Args:
will transform img according to affine transform M
"""
super().__init__()
self._set_attributes(locals())
self.warp_kwargs = {}
if flags is not None:
self.warp_kwargs["flags"] = flags
if border_mode is not None:
self.warp_kwargs["borderMode"] = border_mode
def _warp_array(self, input_data: np.array, interp_flag: Optional[int] = None):
warp_kwargs = copy.deepcopy(self.warp_kwargs)
if interp_flag is not None:
flags = warp_kwargs.get("flags", 0)
# remove previous interp and add the new one
flags = (flags - (flags & cv2.INTER_MAX)) + interp_flag
warp_kwargs["flags"] = flags
M = self.M
if self.is_inversed_M:
M = M[:2]
img = cv2.warpAffine(
input_data,
M,
(int(self.img_w), (self.img_h)),
**warp_kwargs,
)
return img
def apply_image(self, img: np.ndarray) -> np.ndarray:
return self._warp_array(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
# Add row of ones to enable matrix multiplication
coords = coords.T
ones = np.ones((1, coords.shape[1]))
coords = np.vstack((coords, ones))
M = self.M
if self.is_inversed_M:
M = np.linalg.inv(M)
coords = (M @ coords)[:2, :].T
return coords
def apply_segmentation(self, img: np.ndarray) -> np.ndarray:
return self._warp_array(img, interp_flag=cv2.INTER_NEAREST)
class RandomPivotScaling(TransformGen):
"""
Uniformly pick a random pivot point inside image frame, scaling the image
around the pivot point using the scale factor sampled from a list of
given scales. The pivot point's location is unchanged after the transform.
Arguments:
scales: List[float]: each element can be any positive float number,
when larger than 1.0 objects become larger after transform
and vice versa.
"""
def __init__(self, scales: List[int]):
super().__init__()
self._init(locals())
self.scales = scales
def get_transform(self, img: np.ndarray) -> Transform:
img_h, img_w, _ = img.shape
img_h = float(img_h)
img_w = float(img_w)
pivot_y = self._rand_range(0.0, img_h)
pivot_x = self._rand_range(0.0, img_w)
def _interp(p1, p2, alpha):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
p_x = p1[0] + alpha * dx
p_y = p1[1] + alpha * dy
return (p_x, p_y)
scale = np.random.choice(self.scales)
lt = (0.0, 0.0)
rb = (img_w, img_h)
pivot = (pivot_x, pivot_y)
pts1 = np.float32([lt, pivot, rb])
pts2 = np.float32(
[_interp(pivot, lt, scale), pivot, _interp(pivot, rb, scale)],
)
M = cv2.getAffineTransform(pts1, pts2)
return AffineTransform(M, img_w, img_h)
class RandomAffine(TransformGen):
"""
Apply random affine trasform to the image given
probabilities and ranges in each dimension.
"""
def __init__(
self,
prob: float = 0.5,
angle_range: Tuple[float, float] = (-90, 90),
translation_range: Tuple[float, float] = (0, 0),
scale_range: Tuple[float, float] = (1.0, 1.0),
shear_range: Tuple[float, float] = (0, 0),
fit_in_frame: bool = True,
keep_aspect_ratio: bool = False,
):
"""
Args:
prob (float): probability of applying transform.
angle_range (tuple of integers): min/max rotation angle in degrees
between -180 and 180.
translation_range (tuple of integers): min/max translation
(post re-centered rotation).
scale_range (tuple of floats): min/max scale (post re-centered rotation).
shear_range (tuple of intgers): min/max shear angle value in degrees
between -180 to 180.
fit_in_frame: warped image is scaled into the output frame
keep_aspect_ratio: aspect ratio is kept instead of creating a squared image
with dimension of max dimension
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def _compute_scale_adjustment(
self,
im_w: float,
im_h: float,
out_w: float,
out_h: float,
center: Tuple[float, float],
angle: float,
shear: Tuple[float, float],
) -> float:
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, [0.0, 0.0], 1.0, shear
)
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
# Center in output patch
img_corners = np.array(
[
[0, 0, im_w - 1, im_w - 1],
[0, im_h - 1, 0, im_h - 1],
[1, 1, 1, 1],
]
)
new_corners = M @ img_corners
x_range = np.ceil(np.amax(new_corners[0]) - np.amin(new_corners[0]))
y_range = np.ceil(np.amax(new_corners[1]) - np.amin(new_corners[1]))
# Apply translation and scale after centering in output patch
scale_adjustment = min(out_w / x_range, out_h / y_range)
return scale_adjustment
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if not do:
return NoOpTransform()
im_h, im_w = img.shape[:2]
center = [im_w / 2, im_h / 2]
angle = random.uniform(self.angle_range[0], self.angle_range[1])
translation = [
random.uniform(self.translation_range[0], self.translation_range[1]),
random.uniform(self.translation_range[0], self.translation_range[1]),
]
scale = random.uniform(self.scale_range[0], self.scale_range[1])
shear = [
random.uniform(self.shear_range[0], self.shear_range[1]),
random.uniform(self.shear_range[0], self.shear_range[1]),
]
# Determine output image size
max_size = max(im_w, im_h)
out_w, out_h = (im_w, im_h) if self.keep_aspect_ratio else (max_size, max_size)
# Apply translation adjustment
translation_adjustment = [(out_w - im_w) / 2, (out_h - im_h) / 2]
translation[0] += translation_adjustment[0]
translation[1] += translation_adjustment[1]
# Apply scale adjustment
if self.fit_in_frame:
scale_adjustment = self._compute_scale_adjustment(
im_w, im_h, out_w, out_h, center, angle, shear
)
scale *= scale_adjustment
# Compute the affine transform
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, translation, scale, shear
)
M_inv = np.array(M_inv).reshape((2, 3))
M_inv = np.vstack([M_inv, [0.0, 0.0, 1.0]])
return AffineTransform(
M_inv,
out_w,
out_h,
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REPLICATE,
is_inversed_M=True,
)
# example repr: "RandomPivotScalingOp::[1.0, 0.75, 0.5]"
@TRANSFORM_OP_REGISTRY.register()
def RandomPivotScalingOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
scales = json.loads(arg_str)
assert isinstance(scales, list)
assert all(isinstance(scale, (float, int)) for scale in scales)
return [RandomPivotScaling(scales=scales)]
@TRANSFORM_OP_REGISTRY.register()
def RandomAffineOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = json.loads(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomAffine(**kwargs)]
|
d2go-main
|
d2go/data/transforms/affine.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.data.transforms import ( # noqa
affine as _affine,
auto_aug,
blur as _blur,
box_utils as _box_utils,
color_yuv as _color_yuv,
crop as _crop,
d2_native as _d2_native,
)
# @fb-only: from d2go.data.transforms import fb as _fb # isort:skip # noqa
|
d2go-main
|
d2go/data/transforms/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
from typing import Any, List, Tuple, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
import torch
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.transform import Transform
from detectron2.structures.boxes import Boxes
def get_box_union(boxes: Boxes):
"""Merge all boxes into a single box"""
if len(boxes) == 0:
return boxes
bt = boxes.tensor
union_bt = torch.cat(
(torch.min(bt[:, :2], 0).values, torch.max(bt[:, 2:], 0).values)
).reshape(1, -1)
return Boxes(union_bt)
def get_box_from_mask(mask: torch.Tensor) -> Tuple[int, int, int, int]:
"""Find if there are non-zero elements per row/column first and then find
min/max position of those elements.
Only support 2d image (h x w)
Return (x1, y1, w, h) if bbox found, otherwise None
"""
assert len(mask.shape) == 2, f"Invalid shape {mask.shape}"
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if bool(np.any(rows)) is False or bool(np.any(cols)) is False:
return None
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
assert cmax >= cmin, f"cmax={cmax}, cmin={cmin}"
assert rmax >= rmin, f"rmax={rmax}, rmin={rmin}"
# x1, y1, w, h
return cmin, rmin, cmax - cmin + 1, rmax - rmin + 1
def get_min_box_aspect_ratio(
bbox_xywh: torch.Tensor, target_aspect_ratio: float
) -> torch.Tensor:
"""Get a minimal bbox that matches the target_aspect_ratio
target_aspect_ratio is representation by w/h
bbox are represented by pixel coordinates"""
bbox_xywh = torch.Tensor(bbox_xywh)
box_w, box_h = bbox_xywh[2:]
box_ar = float(box_w) / box_h
if box_ar >= target_aspect_ratio:
new_w = box_w
new_h = float(new_w) / target_aspect_ratio
else:
new_h = box_h
new_w = new_h * target_aspect_ratio
new_wh = torch.Tensor([new_w, new_h])
bbox_center = bbox_xywh[:2] + bbox_xywh[2:] / 2.0
new_xy = bbox_center - new_wh / 2.0
return torch.cat([new_xy, new_wh])
def get_box_center(bbox_xywh: torch.Tensor) -> torch.Tensor:
"""Get the center of the bbox"""
return torch.Tensor(bbox_xywh[:2]) + torch.Tensor(bbox_xywh[2:]) / 2.0
def get_bbox_xywh_from_center_wh(
bbox_center: torch.Tensor, bbox_wh: torch.Tensor
) -> torch.Tensor:
"""Get a bbox from bbox center and the width and height"""
bbox_wh = torch.Tensor(bbox_wh)
bbox_xy = torch.Tensor(bbox_center) - bbox_wh / 2.0
return torch.cat([bbox_xy, bbox_wh])
def get_bbox_xyxy_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
"""Convert the bbox from xywh format to xyxy format
bbox are represented by pixel coordinates,
the center of pixels are (x + 0.5, y + 0.5)
"""
return torch.Tensor(
[
bbox_xywh[0],
bbox_xywh[1],
bbox_xywh[0] + bbox_xywh[2],
bbox_xywh[1] + bbox_xywh[3],
]
)
def get_bbox_xywh_from_xyxy(bbox_xyxy: torch.Tensor) -> torch.Tensor:
"""Convert the bbox from xyxy format to xywh format"""
return torch.Tensor(
[
bbox_xyxy[0],
bbox_xyxy[1],
bbox_xyxy[2] - bbox_xyxy[0],
bbox_xyxy[3] - bbox_xyxy[1],
]
)
def to_boxes_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
return Boxes(get_bbox_xyxy_from_xywh(bbox_xywh).unsqueeze(0))
def scale_bbox_center(bbox_xywh: torch.Tensor, target_scale: float) -> torch.Tensor:
"""Scale the bbox around the center of the bbox"""
box_center = get_box_center(bbox_xywh)
box_wh = torch.Tensor(bbox_xywh[2:]) * target_scale
return get_bbox_xywh_from_center_wh(box_center, box_wh)
def offset_bbox(bbox_xywh: torch.Tensor, target_offset: float) -> torch.Tensor:
"""Offset the bbox based on target_offset"""
box_center = get_box_center(bbox_xywh)
new_center = box_center + torch.Tensor(target_offset)
return get_bbox_xywh_from_center_wh(new_center, bbox_xywh[2:])
def clip_box_xywh(bbox_xywh: torch.Tensor, image_size_hw: List[int]):
"""Clip the bbox based on image_size_hw"""
h, w = image_size_hw
bbox_xyxy = get_bbox_xyxy_from_xywh(bbox_xywh)
bbox_xyxy[0] = max(bbox_xyxy[0], 0)
bbox_xyxy[1] = max(bbox_xyxy[1], 0)
bbox_xyxy[2] = min(bbox_xyxy[2], w)
bbox_xyxy[3] = min(bbox_xyxy[3], h)
return get_bbox_xywh_from_xyxy(bbox_xyxy)
def scale_coord(
target: Union[torch.tensor, np.ndarray],
source: Union[torch.tensor, np.ndarray],
percentage: float,
):
return [((a - b) * percentage + a) for a, b in zip(target, source)]
def pad_coord(
target: Union[torch.tensor, np.ndarray],
source: Union[torch.tensor, np.ndarray],
fixed_pad: float,
):
return [(np.sign(a - b) * fixed_pad + a) for a, b in zip(target, source)]
class EnlargeBoundingBox(Transform):
"""Enlarge bounding box based on fixed padding or percentage"""
def __init__(
self, percentage: float = None, fixed_pad: int = None, box_only: bool = False
):
super().__init__()
assert percentage is not None or fixed_pad is not None
assert percentage is None or fixed_pad is None
if percentage is not None:
self.xfm_fn = functools.partial(scale_coord, percentage=percentage)
elif fixed_pad is not None:
self.xfm_fn = functools.partial(pad_coord, fixed_pad=fixed_pad)
self.box_only = box_only
def apply_image(self, img: torch.Tensor) -> np.ndarray:
return img
def apply_box(self, coords: Any) -> Any:
# Takes boxes_xyxy
center = (np.array(coords[0, 0:2]) + np.array(coords[0, 2:])) / 2
new_coords = np.zeros_like(coords)
new_coords[0, 0:2] = self.xfm_fn(coords[0, 0:2], center)
new_coords[0, 2:] = self.xfm_fn(coords[0, 2:], center)
return new_coords
def apply_coords(self, coords: Any) -> Any:
if self.box_only:
return coords
assert coords.shape[1] == 2, "Supported 2d inputs only"
center = np.mean(coords, axis=0)
for index in range(coords.shape[0]):
coords[index] = self.xfm_fn(coords[index], center)
return coords
@TRANSFORM_OP_REGISTRY.register()
def EnlargeBoundingBoxOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [EnlargeBoundingBox(**kwargs)]
|
d2go-main
|
d2go/data/transforms/box_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List, Optional, Union
import numpy as np
import torch
from detectron2.data.transforms.augmentation import Augmentation, AugmentationList
from detectron2.structures import Boxes
from fvcore.transforms.transform import Transform
class AugInput:
"""
Same as AugInput in vision/fair/detectron2/detectron2/data/transforms/augmentation.py
but allows torch.Tensor as input
"""
def __init__(
self,
image: Union[np.ndarray, torch.Tensor],
*,
boxes: Optional[Union[np.ndarray, torch.Tensor, Boxes]] = None,
sem_seg: Optional[Union[np.ndarray, torch.Tensor]] = None,
):
"""
Args:
image (ndarray/torch.Tensor): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255]. (C, H, W) for tensor.
boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
is an integer label of pixel.
"""
self.image = image
self.boxes = boxes
self.sem_seg = sem_seg
def transform(self, tfm: Transform) -> None:
"""
In-place transform all attributes of this class.
By "in-place", it means after calling this method, accessing an attribute such
as ``self.image`` will return transformed data.
"""
self.image = tfm.apply_image(self.image)
if self.boxes is not None:
self.boxes = tfm.apply_box(self.boxes)
if self.sem_seg is not None:
self.sem_seg = tfm.apply_segmentation(self.sem_seg)
def apply_augmentations(
self, augmentations: List[Union[Augmentation, Transform]]
) -> AugmentationList:
"""
Equivalent of ``AugmentationList(augmentations)(self)``
"""
return AugmentationList(augmentations)(self)
class Tensor2Array(Transform):
"""Convert image tensor (CHW) to np array (HWC)"""
def __init__(self):
super().__init__()
def apply_image(self, img: torch.Tensor) -> np.ndarray:
# CHW -> HWC
assert isinstance(img, torch.Tensor)
assert len(img.shape) == 3, img.shape
return img.cpu().numpy().transpose(1, 2, 0)
def apply_coords(self, coords: Any) -> Any:
return coords
def apply_segmentation(self, segmentation: torch.Tensor) -> np.ndarray:
assert len(segmentation.shape) == 2, segmentation.shape
return segmentation.cpu().numpy()
def inverse(self) -> Transform:
return Array2Tensor()
class Array2Tensor(Transform):
"""Convert image np array (HWC) to torch tensor (CHW)"""
def __init__(self, preserve_dtype: bool = False):
"""
preserve_dtype: always convert to float32 if False
"""
super().__init__()
self.preserve_dtype = preserve_dtype
def apply_image(self, img: np.ndarray) -> torch.Tensor:
# HW(C) -> CHW
assert isinstance(img, np.ndarray)
assert len(img.shape) in [2, 3], img.shape
if len(img.shape) == 2:
# HW -> HWC
img = np.expand_dims(img, axis=2)
if not self.preserve_dtype:
img = img.astype("float32")
return torch.from_numpy(img.transpose(2, 0, 1))
def apply_coords(self, coords: Any) -> Any:
return coords
def apply_segmentation(self, segmentation: np.ndarray) -> torch.Tensor:
assert len(segmentation.shape) == 2, segmentation.shape
return torch.from_numpy(segmentation.astype("long"))
def inverse(self) -> Transform:
return Tensor2Array()
|
d2go-main
|
d2go/data/transforms/tensor.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from typing import Any, List, Optional, Tuple, Union
import d2go.data.transforms.box_utils as bu
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.transform import ExtentTransform
from detectron2.structures import BoxMode
from fvcore.transforms.transform import CropTransform, NoOpTransform, Transform
class CropBoundary(aug.Augmentation):
"""Crop the boundary of the image by `count` pixel on each side"""
def __init__(self, count=3):
super().__init__()
self.count = count
def get_transform(self, image: np.ndarray) -> Transform:
img_h, img_w = image.shape[:2]
assert self.count < img_h and self.count < img_w
assert img_h > self.count * 2
assert img_w > self.count * 2
box = [self.count, self.count, img_w - self.count * 2, img_h - self.count * 2]
return CropTransform(*box)
class PadTransform(Transform):
def __init__(
self,
x0: int,
y0: int,
w: int,
h: int,
org_w: int,
org_h: int,
pad_mode: str = "constant",
pad_value: float = 0.0,
):
super().__init__()
assert x0 + w <= org_w
assert y0 + h <= org_h
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.array:
"""img: HxWxC or HxW"""
assert len(img.shape) == 2 or len(img.shape) == 3
assert img.shape[0] == self.h and img.shape[1] == self.w
pad_width = [
(self.y0, self.org_h - self.h - self.y0),
(self.x0, self.org_w - self.w - self.x0),
*([(0, 0)] if len(img.shape) == 3 else []),
]
pad_args = {"mode": self.pad_mode}
if self.pad_mode == "constant":
pad_args["constant_values"] = self.pad_value
ret = np.pad(img, pad_width=tuple(pad_width), **pad_args)
return ret
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def inverse(self) -> Transform:
return CropTransform(self.x0, self.y0, self.w, self.h, self.org_w, self.org_h)
InvertibleCropTransform = CropTransform
class PadBorderDivisible(aug.Augmentation):
def __init__(self, size_divisibility: int, pad_mode: str = "constant"):
super().__init__()
self.size_divisibility = size_divisibility
self.pad_mode = pad_mode
def get_transform(self, image: np.ndarray) -> Transform:
"""image: HxWxC"""
assert len(image.shape) == 3 and image.shape[2] in [
1,
3,
], f"Invalid image shape {image.shape}"
H, W = image.shape[:2]
new_h = int(math.ceil(H / self.size_divisibility) * self.size_divisibility)
new_w = int(math.ceil(W / self.size_divisibility) * self.size_divisibility)
return PadTransform(0, 0, W, H, new_w, new_h, pad_mode=self.pad_mode)
class PadToSquare(aug.Augmentation):
"""Pad the image to square"""
def __init__(
self,
pad_mode: str = "constant",
pad_value: float = 0.0,
):
super().__init__()
self.pad_mode = pad_mode
self.pad_value = pad_value
def get_transform(self, image: np.ndarray) -> Transform:
"""image: HxWxC"""
assert len(image.shape) == 3 and image.shape[2] in [
1,
3,
], f"Invalid image shape {image.shape}"
H, W = image.shape[:2]
new_h = new_w = max(H, W)
return PadTransform(
0,
0,
W,
H,
new_w,
new_h,
pad_mode=self.pad_mode,
pad_value=self.pad_value,
)
class RandomCropFixedAspectRatio(aug.Augmentation):
def __init__(
self,
crop_aspect_ratios_list: List[float],
scale_range: Optional[Union[List, Tuple]] = None,
offset_scale_range: Optional[Union[List, Tuple]] = None,
):
super().__init__()
assert isinstance(crop_aspect_ratios_list, (list, tuple))
assert (
scale_range is None
or isinstance(scale_range, (list, tuple))
and len(scale_range) == 2
)
assert (
offset_scale_range is None
or isinstance(offset_scale_range, (list, tuple))
and len(offset_scale_range) == 2
)
# [w1/h1, w2/h2, ...]
self.crop_aspect_ratios_list = crop_aspect_ratios_list
# [low, high] or None
self.scale_range = scale_range
# [low, high] or None
self.offset_scale_range = offset_scale_range
self.rng = np.random.default_rng()
def _pick_aspect_ratio(self) -> float:
return self.rng.choice(self.crop_aspect_ratios_list)
def _pick_scale(self) -> float:
if self.scale_range is None:
return 1.0
return self.rng.uniform(*self.scale_range)
def _pick_offset(self, box_w: float, box_h: float) -> Tuple[float, float]:
if self.offset_scale_range is None:
return [0, 0]
offset_scale = self.rng.uniform(*self.offset_scale_range, size=2)
return offset_scale[0] * box_w, offset_scale[1] * box_h
def get_transform(self, image: np.ndarray, sem_seg: np.ndarray) -> Transform:
# HWC or HW for image, HW for sem_seg
assert len(image.shape) in [2, 3]
assert len(sem_seg.shape) == 2
mask_box_xywh = bu.get_box_from_mask(sem_seg)
# do nothing if the mask is empty (the whole image is background)
if mask_box_xywh is None:
return NoOpTransform()
crop_ar = self._pick_aspect_ratio()
target_scale = self._pick_scale()
target_offset = self._pick_offset(*mask_box_xywh[2:])
mask_box_xywh = bu.offset_bbox(mask_box_xywh, target_offset)
mask_box_xywh = bu.scale_bbox_center(mask_box_xywh, target_scale)
target_box_xywh = bu.get_min_box_aspect_ratio(mask_box_xywh, crop_ar)
target_bbox_xyxy = bu.get_bbox_xyxy_from_xywh(target_box_xywh)
return ExtentTransform(
src_rect=target_bbox_xyxy,
output_size=(
int(target_box_xywh[3].item()),
int(target_box_xywh[2].item()),
),
)
# example repr: "CropBoundaryOp::{'count': 3}"
@TRANSFORM_OP_REGISTRY.register()
def CropBoundaryOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [CropBoundary(**kwargs)]
# example repr: 'PadToSquareOp::{"pad_value": 255.0}'
@TRANSFORM_OP_REGISTRY.register()
def PadToSquareOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [PadToSquare(**kwargs)]
# example repr: "RandomCropFixedAspectRatioOp::{'crop_aspect_ratios_list': [0.5], 'scale_range': [0.8, 1.2], 'offset_scale_range': [-0.3, 0.3]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomCropFixedAspectRatioOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomCropFixedAspectRatio(**kwargs)]
class RandomInstanceCrop(aug.Augmentation):
def __init__(
self, crop_scale: Tuple[float, float] = (0.8, 1.6), fix_instance=False
):
"""
Generates a CropTransform centered around the instance.
crop_scale: [low, high] relative crop scale around the instance, this
determines how far to zoom in / out around the cropped instance
"""
super().__init__()
self.crop_scale = crop_scale
self.fix_instance = fix_instance
assert (
isinstance(crop_scale, (list, tuple)) and len(crop_scale) == 2
), crop_scale
def get_transform(self, image: np.ndarray, annotations: List[Any]) -> Transform:
"""
This function will modify instances to set the iscrowd flag to 1 for
annotations not picked. It relies on the dataset mapper to filter those
items out
"""
assert isinstance(annotations, (list, tuple)), annotations
assert all("bbox" in x for x in annotations), annotations
assert all("bbox_mode" in x for x in annotations), annotations
image_size = image.shape[:2]
# filter out iscrowd
annotations = [x for x in annotations if x.get("iscrowd", 0) == 0]
if len(annotations) == 0:
return NoOpTransform()
if not self.fix_instance:
sel_index = np.random.randint(len(annotations))
else:
sel_index = 0
# set iscrowd flag of other annotations to 1 so that they will be
# filtered out by the datset mapper (https://fburl.com/diffusion/fg64cb4h)
for idx, instance in enumerate(annotations):
if idx != sel_index:
instance["iscrowd"] = 1
instance = annotations[sel_index]
bbox_xywh = BoxMode.convert(
instance["bbox"], instance["bbox_mode"], BoxMode.XYWH_ABS
)
scale = np.random.uniform(*self.crop_scale)
bbox_xywh = bu.scale_bbox_center(bbox_xywh, scale)
bbox_xywh = bu.clip_box_xywh(bbox_xywh, image_size).int()
return CropTransform(
*bbox_xywh.tolist(), orig_h=image_size[0], orig_w=image_size[1]
)
# example repr: "RandomInstanceCropOp::{'crop_scale': [0.8, 1.6]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomInstanceCropOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomInstanceCrop(**kwargs)]
class CropBoxAug(aug.Augmentation):
"""Augmentation to crop the image based on boxes
Scale the box with `box_scale_factor` around the center before cropping
"""
def __init__(self, box_scale_factor: float = 1.0):
super().__init__()
self.box_scale_factor = box_scale_factor
def get_transform(self, image: np.ndarray, boxes: np.ndarray) -> Transform:
# boxes: 1 x 4 in xyxy format
assert boxes.shape[0] == 1
assert isinstance(image, np.ndarray)
assert isinstance(boxes, np.ndarray)
img_h, img_w = image.shape[0:2]
box_xywh = bu.get_bbox_xywh_from_xyxy(boxes[0])
if self.box_scale_factor != 1.0:
box_xywh = bu.scale_bbox_center(box_xywh, self.box_scale_factor)
box_xywh = bu.clip_box_xywh(box_xywh, [img_h, img_w])
box_xywh = box_xywh.int().tolist()
return CropTransform(*box_xywh, orig_w=img_w, orig_h=img_h)
|
d2go-main
|
d2go/data/transforms/crop.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from typing import Dict, List, Tuple
import cv2
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from fvcore.transforms.transform import NoOpTransform, Transform
class LocalizedBoxMotionBlurTransform(Transform):
"""Transform to blur provided bounding boxes from an image."""
def __init__(
self,
bounding_boxes: List[List[int]],
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
import imgaug.augmenters as iaa
super().__init__()
self._set_attributes(locals())
self.aug = iaa.MotionBlur(k, angle, direction, 1)
def apply_image(self, img: np.ndarray) -> np.ndarray:
bbox_regions = [img[y : y + h, x : x + w] for x, y, w, h in self.bounding_boxes]
blurred_boxes = self.aug.augment_images(bbox_regions)
new_img = np.array(img)
for (x, y, w, h), blurred in zip(self.bounding_boxes, blurred_boxes):
new_img[y : y + h, x : x + w] = blurred
return new_img
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""Apply no transform on the full-image segmentation."""
return segmentation
def apply_coords(self, coords: np.ndarray):
"""Apply no transform on the coordinates."""
return coords
def inverse(self) -> Transform:
"""The inverse is a No-op, only for geometric transforms."""
return NoOpTransform()
class LocalizedBoxMotionBlur(aug.Augmentation):
"""
Performs faked motion blur on bounding box annotations in an image.
Randomly selects motion blur parameters from the ranges `k`, `angle`, `direction`.
"""
def __init__(
self,
prob: float = 0.5,
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
super().__init__()
self._init(locals())
def _validate_bbox_xywh_within_bounds(
self, bbox: List[int], img_h: int, img_w: int
):
x, y, w, h = bbox
assert x >= 0, f"Invalid x {x}"
assert y >= 0, f"Invalid y {x}"
assert y + h <= img_h, f"Invalid right {x+w} (img width {img_w})"
assert y + h <= img_h, f"Invalid bottom {y+h} (img height {img_h})"
def get_transform(self, image: np.ndarray, annotations: List[Dict]) -> Transform:
do_tfm = self._rand_range() < self.prob
if do_tfm:
return self._get_blur_transform(image, annotations)
else:
return NoOpTransform()
def _get_blur_transform(
self, image: np.ndarray, annotations: List[Dict]
) -> Transform:
"""
Return a `Transform` that simulates motion blur within the image's bounding box regions.
"""
img_h, img_w = image.shape[:2]
bboxes = [ann["bbox"] for ann in annotations]
# Debug
for bbox in bboxes:
self._validate_bbox_xywh_within_bounds(bbox, img_h, img_w)
return LocalizedBoxMotionBlurTransform(
bboxes,
k=self.k,
angle=self.angle,
direction=self.direction,
)
# example repr: "LocalizedBoxMotionBlurOp::{'prob': 0.5, 'k': [3,7], 'angle': [0, 360]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomLocalizedBoxMotionBlurOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [LocalizedBoxMotionBlur(**kwargs)]
class MotionBlurTransform(Transform):
def __init__(
self,
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
"""
Args:
will apply the specified blur to the image
"""
super().__init__()
self._set_attributes(locals())
self.k = k
self.angle = angle
self.direction = direction
def apply_image(self, img: np.ndarray) -> np.ndarray:
# Imported here and not in __init__to avoid linting errors
# also, imported here and not in the header section
# since the rest of the code does not have this dependency
import imgaug.augmenters as iaa
aug = iaa.MotionBlur(self.k, self.angle, self.direction, 1)
img = aug.augment_image(img)
return img
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
class RandomMotionBlur(aug.Augmentation):
"""
Apply random motion blur.
"""
def __init__(
self,
prob: float = 0.5,
k: Tuple[float, float] = (3, 7),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
"""
Args:
prob (float): probability of applying transform
k (tuple): refer to `iaa.MotionBlur`
angle (tuple): refer to `iaa.MotionBlur`
direction (tuple): refer to `iaa.MotionBlur`
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if do:
return MotionBlurTransform(self.k, self.angle, self.direction)
else:
return NoOpTransform()
# example repr: "RandomMotionBlurOp::{'prob': 0.5, 'k': [3,7], 'angle': [0, 360]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomMotionBlurOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomMotionBlur(**kwargs)]
class GaussianBlurTransform(Transform):
def __init__(
self,
k: int = 3,
sigma_range: Tuple[float, float] = (0.3, 0.3),
):
"""
Args:
will apply the specified blur to the image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
sigma = random.uniform(*self.sigma_range)
img_out = cv2.GaussianBlur(img, (self.k, self.k), sigma)
return img_out
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
class RandomGaussianBlur(aug.Augmentation):
"""
Apply random motion blur.
"""
def __init__(
self,
prob: float = 0.5,
k: int = 3,
sigma_range: Tuple[float, float] = (0.3, 0.3),
):
"""
Args:
prob (float): probability of applying transform
k (int): kernel size
sigma_range (tuple): min, max of sigma gaussian filter used
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if do:
return GaussianBlurTransform(self.k, self.sigma_range)
else:
return NoOpTransform()
# example repr: "RandomGaussianBlurOp::{'prob': 0.5, 'k': 5, 'sigma': [0.1, 2]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomGaussianBlurOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomGaussianBlur(**kwargs)]
|
d2go-main
|
d2go/data/transforms/blur.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List, Optional, Union
import detectron2.data.transforms.augmentation as aug
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data import transforms as d2T
from detectron2.projects.point_rend import ColorAugSSDTransform
logger = logging.getLogger(__name__)
D2_RANDOM_TRANSFORMS = {
"RandomBrightness": d2T.RandomBrightness,
"RandomContrast": d2T.RandomContrast,
"RandomCrop": d2T.RandomCrop,
"RandomRotation": d2T.RandomRotation,
"RandomExtent": d2T.RandomExtent,
"RandomFlip": d2T.RandomFlip,
"RandomSaturation": d2T.RandomSaturation,
"RandomLighting": d2T.RandomLighting,
"RandomResize": d2T.RandomResize,
"FixedSizeCrop": d2T.FixedSizeCrop,
"ResizeScale": d2T.ResizeScale,
"MinIoURandomCrop": d2T.MinIoURandomCrop,
}
def build_func(
cfg: CfgNode, arg_str: str, is_train: bool, name: str
) -> List[Union[aug.Augmentation, d2T.Transform]]:
assert is_train, "Random augmentation is for training only"
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [D2_RANDOM_TRANSFORMS[name](**kwargs)]
# example 1: RandomFlipOp
# example 2: RandomFlipOp::{}
# example 3: RandomFlipOp::{"prob":0.5}
# example 4: RandomBrightnessOp::{"intensity_min":1.0, "intensity_max":2.0}
@TRANSFORM_OP_REGISTRY.register()
def RandomBrightnessOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomBrightness")
@TRANSFORM_OP_REGISTRY.register()
def RandomContrastOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomContrast")
@TRANSFORM_OP_REGISTRY.register()
def RandomCropOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomCrop")
@TRANSFORM_OP_REGISTRY.register()
def RandomRotation(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomRotation")
@TRANSFORM_OP_REGISTRY.register()
def RandomExtentOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomExtent")
@TRANSFORM_OP_REGISTRY.register()
def RandomFlipOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomFlip")
@TRANSFORM_OP_REGISTRY.register()
def RandomSaturationOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomSaturation")
@TRANSFORM_OP_REGISTRY.register()
def RandomLightingOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomLighting")
@TRANSFORM_OP_REGISTRY.register()
def RandomSSDColorAugOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
assert "img_format" not in kwargs
return [ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT, **kwargs)]
# example repr: ResizeScaleOp::{"min_scale": 0.1, "max_scale": 2.0, "target_height": 1024, "target_width": 1024}
@TRANSFORM_OP_REGISTRY.register()
def ResizeScaleOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="ResizeScale")
@TRANSFORM_OP_REGISTRY.register()
def MinIoURandomCropOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="MinIoURandomCrop")
# example repr: FixedSizeCropOp::{"crop_size": [1024, 1024]}
@TRANSFORM_OP_REGISTRY.register()
def FixedSizeCropOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="FixedSizeCrop")
# example repr: RandomResizeOp::{"shape_list": [[224, 224], [256, 256], [320, 320]]}
@TRANSFORM_OP_REGISTRY.register()
def RandomResizeOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="RandomResize")
|
d2go-main
|
d2go/data/transforms/d2_native.py
|
#!/usr/bin/env python3
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import logging
from functools import partial
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
from d2go.trainer.helper import D2GO_WRAP_POLICY_REGISTRY
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
logger = logging.getLogger(__name__)
def add_activation_checkpoint_configs(_C: CN):
_C.ACTIVATION_CHECKPOINT = CN()
_C.ACTIVATION_CHECKPOINT.REENTRANT = False
# Find autowrap policy at D2GO_WRAP_POLICY_REGISTRY, or use '' to disable autowrap
_C.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "always_wrap_policy"
# A list of layer cls names to wrap, case sensitive
_C.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = []
@MODELING_HOOK_REGISTRY.register()
class ActivationCheckpointModelingHook(mh.ModelingHook):
"""Modeling hook that wraps model in activation checkpoint based on config"""
def apply(self, model: nn.Module) -> nn.Module:
logger.info("Activation Checkpointing is used")
wrapper_fn = partial(
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT
if not self.cfg.ACTIVATION_CHECKPOINT.REENTRANT
else CheckpointImpl.REENTRANT,
)
policy_name = self.cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY
assert (
policy_name != "size_based_auto_wrap_policy"
), "ActivationCheckpointing should always be wrapped at module boundary"
policy_kwargs = {
"layer_names": self.cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS,
}
auto_wrap_policy = (
D2GO_WRAP_POLICY_REGISTRY.get(policy_name)(model, **policy_kwargs)
if policy_name != ""
else lambda _: True
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=wrapper_fn, auto_wrap_policy=auto_wrap_policy
)
return model
def unapply(self, model: nn.Module) -> nn.Module:
raise NotImplementedError(
"ActivationCheckpointModelingHook.unapply() not implemented: can't unwrap an activation checkpoint module"
)
|
d2go-main
|
d2go/trainer/activation_checkpointing.py
|
#!/usr/bin/env python3
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import contextlib
import logging
from enum import Enum
from typing import Generator, Optional
import torch
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling.modeling_hook import ModelingHook
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
from d2go.trainer.helper import D2GO_WRAP_POLICY_REGISTRY, parse_precision_from_string
from torch.ao.pruning import fqn_to_module
from torch.cuda.amp import GradScaler
from torch.distributed.fsdp.fully_sharded_data_parallel import (
BackwardPrefetch,
CPUOffload,
FullStateDictConfig,
FullyShardedDataParallel as FSDP,
LocalStateDictConfig,
MixedPrecision,
ShardedStateDictConfig,
ShardingStrategy,
StateDictType,
)
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
logger = logging.getLogger(__name__)
def add_fsdp_configs(_C: CN):
_C.FSDP = CN()
_C.FSDP.ALGORITHM = "grad_optim" # 'grad_optim', 'full', 'hybrid', 'hybrid_zero2'
# Configs for fully sharded data parallel (fsdp)
# Check out https://pytorch.org/docs/stable/fsdp.html
# and docstring of torch.distributed.fsdp.fully_sharded_data_parallel
_C.FSDP.CPU_OFFLOAD = False
_C.FSDP.BACKWARD_PREFETCH = True
_C.FSDP.USE_ORIG_PARAMS = False
# Find autowrap policy at D2GO_WRAP_POLICY_REGISTRY, or use '' to disable autowrap
_C.FSDP.AUTO_WRAP_POLICY = "never_wrap_policy"
_C.FSDP.AUTO_WRAP_MIN_PARAMS = int(1e4)
# A list of layer cls names to wrap, case sensitive
_C.FSDP.AUTO_WRAP_LAYER_CLS = []
# Whether to use local state dict -- superseded by STATE_DICT_TYPE
_C.FSDP.USE_LOCAL_STATE_DICT = True
# State dict type to use when calling FSDPWrapper.state_dict() (used when saving).
# If None, defaults to checking the value of USE_LOCAL_STATE_DICT
_C.FSDP.STATE_DICT_TYPE = "SHARDED_STATE_DICT"
# Whether to offload state dict to cpu
_C.FSDP.STATE_DICT_CPU_OFFLOAD = False
# Whether to materialize state dict on rank 0
_C.FSDP.STATE_DICT_RANK0_ONLY = True
# The ignored modules, if any
_C.FSDP.IGNORED_MODULES = None
# Whether to prefetch in forward pass
_C.FSDP.FORWARD_PREFETCH_OPTION = "no"
# if False, this allows the CPU thread to schedule all-gathers without any extra synchronization
_C.FSDP.LIMIT_ALL_GATHERS = False
class ShardingAlgorithm(str, Enum):
"""
This enum specifies the sharding algorithm to be used by FullyShardedDataParallel (FSDP).
It matches the strings used in D2Go config with the enum class :class:`ShardingStrategy` used by Pytorch FSDP module:
"grad_optim" => ShardingAlgorithm.SHARD_GRAD_OP => ShardingStrategy.SHARD_GRAD_OP
"full" => ShardingAlgorithm.FULL_SHARD => ShardingStrategy.FULL_SHARD
"hybrid" => ShardingAlgorithm.HYBRID_SHARD => ShardingStrategy.HYBRID_SHARD
"hybrid_zero2" => ShardingAlgorithm.HYBRID_SHARD_ZERO2 => ShardingStrategy._HYBRID_SHARD_ZERO2
"""
SHARD_GRAD_OP = "grad_optim"
FULL_SHARD = "full"
HYBRID_SHARD = "hybrid"
HYBRID_SHARD_ZERO2 = "hybrid_zero2"
class ForwardPrefetchOption(str, Enum):
"""
This enum specifies the forward prefetch types to be used by FullyShardedDataParallel (FSDP).
"auto" => Use the default forward prefetch mechanism in FSDP.
"manual" => Use custom forward prefetch mechansim, implemented as training hooks.
"no" => No forward prefetch.
"""
AUTO = "auto"
MANUAL = "manual"
NO = "no"
def is_fsdp_enabled(cfg):
return "FSDPModelingHook" in cfg.MODEL.MODELING_HOOKS
def get_grad_scaler(cfg):
return ShardedGradScaler() if is_fsdp_enabled(cfg) else GradScaler()
class FSDPWrapper(FSDP):
def __init__(
self,
model,
state_dict_type: StateDictType,
load_state_dict_type: StateDictType,
amp_autocast_dtype: Optional[torch.dtype] = None,
state_dict_cpu_offload: bool = True,
state_dict_rank0_only: bool = True,
**fsdp_kwargs,
):
self.precision = amp_autocast_dtype
self.state_dict_type = state_dict_type
self.load_state_dict_type = load_state_dict_type
self.offload_to_cpu = state_dict_cpu_offload
self.rank0_only = state_dict_rank0_only
super().__init__(model, **fsdp_kwargs)
def forward(self, *args, **kwargs):
# Wrap forward() in autocast if mixed precision is enabled
if self.precision is not None and not torch.is_autocast_enabled():
from torch.cuda.amp import autocast
with autocast(dtype=self.precision):
return super().forward(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@contextlib.contextmanager
def state_dict_type_and_config(self, state_dict_type: StateDictType) -> Generator:
if state_dict_type == StateDictType.LOCAL_STATE_DICT:
# only offload_to_cpu=False is supported for local state dict
state_dict_config = LocalStateDictConfig(offload_to_cpu=False)
elif state_dict_type == StateDictType.FULL_STATE_DICT:
state_dict_config = FullStateDictConfig(
offload_to_cpu=self.offload_to_cpu, rank0_only=self.rank0_only
)
else:
state_dict_config = ShardedStateDictConfig(
offload_to_cpu=self.offload_to_cpu
)
with FSDP.state_dict_type(self, state_dict_type, state_dict_config):
yield
def state_dict(self, *args, **kwargs):
# NOTE: model.state_dict() needs to be called by all ranks because synchronization primitives are used
with self.state_dict_type_and_config(self.state_dict_type):
return super().state_dict(*args, **kwargs)
def load_state_dict(
self,
state_dict,
*args,
**kwargs,
):
with self.state_dict_type_and_config(self.load_state_dict_type):
return super().load_state_dict(state_dict, *args, **kwargs)
def build_fsdp(
model,
*,
sharding_algorithm: str = ShardingAlgorithm.FULL_SHARD,
auto_wrap_policy_name: str = "",
auto_wrap_policy_kwargs: Optional[dict] = None,
use_cpu_offload: bool = False,
use_backward_prefetch: bool = True,
param_dtype: Optional[torch.dtype] = None,
reduce_dtype: Optional[torch.dtype] = None,
buffer_dtype: Optional[torch.dtype] = None,
amp_autocast_dtype: Optional[torch.dtype] = None,
# TODO: to remove after migration to state_dict_type completes
use_local_state_dict: bool = False,
load_local_state_dict: bool = False,
state_dict_type: Optional[StateDictType] = None,
state_dict_cpu_offload: bool = True,
state_dict_rank0_only: bool = True,
ignored_modules: Optional[nn.Module] = None,
forward_prefetch: bool = False,
use_orig_params: bool = False,
device_id: Optional[int] = None,
limit_all_gathers: bool = False,
):
if sharding_algorithm == ShardingAlgorithm.SHARD_GRAD_OP:
sharding_strategy = ShardingStrategy.SHARD_GRAD_OP
logger.info("Optimizer + Gradient State Sharding (ZeRO-2) is used")
elif sharding_algorithm == ShardingAlgorithm.FULL_SHARD:
sharding_strategy = ShardingStrategy.FULL_SHARD
logger.info("Optimizer + Gradient + Horizontal Model Sharding (ZeRO-3) is used")
elif sharding_algorithm == ShardingAlgorithm.HYBRID_SHARD:
sharding_strategy = ShardingStrategy.HYBRID_SHARD
logger.info(
"Optimizer + Gradient + Horizontal Model Sharding (ZeRO-3) within a node is used"
)
elif sharding_algorithm == ShardingAlgorithm.HYBRID_SHARD_ZERO2:
sharding_strategy = ShardingStrategy._HYBRID_SHARD_ZERO2
logger.info(
"Optimizer + Gradient State Sharding (ZeRO-2) within a node is used"
)
else:
raise ValueError(
f"Invalid sharding algorithm for FSDP. Can be {ShardingAlgorithm.SHARD_GRAD_OP}, "
+ f"{ShardingAlgorithm.FULL_SHARD}, {ShardingAlgorithm.HYBRID_SHARD}, or {ShardingAlgorithm.HYBRID_SHARD_ZERO2}."
)
auto_wrap_policy = (
D2GO_WRAP_POLICY_REGISTRY.get(auto_wrap_policy_name)(
model, **auto_wrap_policy_kwargs
)
if auto_wrap_policy_name != ""
else None
)
cpu_offload = CPUOffload(offload_params=use_cpu_offload)
mixed_precision = MixedPrecision(
param_dtype=param_dtype,
reduce_dtype=reduce_dtype,
buffer_dtype=buffer_dtype,
keep_low_precision_grads=False,
)
backward_prefetch = (
BackwardPrefetch.BACKWARD_PRE
if use_backward_prefetch
else BackwardPrefetch.BACKWARD_POST
)
fsdp_kwargs = {
"sharding_strategy": sharding_strategy,
"cpu_offload": cpu_offload,
"mixed_precision": mixed_precision,
"auto_wrap_policy": auto_wrap_policy,
"backward_prefetch": backward_prefetch,
"ignored_modules": ignored_modules,
"forward_prefetch": forward_prefetch,
"use_orig_params": use_orig_params,
"device_id": torch.cuda.current_device() if not device_id else device_id,
"limit_all_gathers": limit_all_gathers,
}
# default to using use_local_state_dict if state_dict_type is None
if not state_dict_type:
_state_dict_type = (
StateDictType.LOCAL_STATE_DICT
if use_local_state_dict
else StateDictType.FULL_STATE_DICT
)
else:
_state_dict_type = state_dict_type
# load_state_dict_type defaults to load_local_state_dict
_load_state_dict_type = (
StateDictType.LOCAL_STATE_DICT
if load_local_state_dict
else StateDictType.FULL_STATE_DICT
)
wrapper_kwargs = {
"amp_autocast_dtype": amp_autocast_dtype,
"state_dict_type": _state_dict_type,
"load_state_dict_type": _load_state_dict_type,
"state_dict_cpu_offload": state_dict_cpu_offload,
"state_dict_rank0_only": state_dict_rank0_only,
}
return FSDPWrapper(model, **wrapper_kwargs, **fsdp_kwargs)
@MODELING_HOOK_REGISTRY.register()
class FSDPModelingHook(ModelingHook):
"""Modeling hook that wraps model in FSDP based on config"""
def apply(self, model: nn.Module) -> FSDPWrapper:
# SOLVER.AMP.ENABLED and SOLVER.AMP.PRECISION controls mixed precision for all parameters, buffers and reduce in FSDP
precision_dtype = (
parse_precision_from_string(self.cfg.SOLVER.AMP.PRECISION, lightning=False)
if self.cfg.SOLVER.AMP.ENABLED
else None
)
ignored_modules = None
if isinstance(self.cfg.FSDP.IGNORED_MODULES, list):
ignored_modules = []
for mod_name in self.cfg.FSDP.IGNORED_MODULES:
mod = fqn_to_module(model, mod_name)
assert mod is not None, f"Module {mod_name} cannot be found in model."
ignored_modules.append(mod)
forward_prefetch = (
self.cfg.FSDP.FORWARD_PREFETCH_OPTION == ForwardPrefetchOption.AUTO
)
_state_dict_type = (
StateDictType[self.cfg.FSDP.STATE_DICT_TYPE]
if self.cfg.FSDP.STATE_DICT_TYPE
else None
)
wrapped_model = build_fsdp(
model,
sharding_algorithm=self.cfg.FSDP.ALGORITHM,
auto_wrap_policy_name=self.cfg.FSDP.AUTO_WRAP_POLICY,
auto_wrap_policy_kwargs={
"min_num_params": self.cfg.FSDP.AUTO_WRAP_MIN_PARAMS,
"layer_names": self.cfg.FSDP.AUTO_WRAP_LAYER_CLS,
},
use_cpu_offload=self.cfg.FSDP.CPU_OFFLOAD,
use_backward_prefetch=self.cfg.FSDP.BACKWARD_PREFETCH,
param_dtype=precision_dtype,
reduce_dtype=precision_dtype,
buffer_dtype=None,
amp_autocast_dtype=precision_dtype,
use_local_state_dict=self.cfg.FSDP.USE_LOCAL_STATE_DICT,
load_local_state_dict=self.cfg.FSDP.USE_LOCAL_STATE_DICT,
state_dict_type=_state_dict_type,
state_dict_cpu_offload=self.cfg.FSDP.STATE_DICT_CPU_OFFLOAD,
state_dict_rank0_only=self.cfg.FSDP.STATE_DICT_RANK0_ONLY,
ignored_modules=ignored_modules,
forward_prefetch=forward_prefetch,
use_orig_params=self.cfg.FSDP.USE_ORIG_PARAMS,
device_id=torch.cuda.current_device(),
limit_all_gathers=self.cfg.FSDP.LIMIT_ALL_GATHERS,
)
return wrapped_model
def unapply(self, model: FSDPWrapper) -> nn.Module:
raise NotImplementedError(
"FSDPModelingHook.unapply() not implemented: can't unwrap a FSDP module"
)
|
d2go-main
|
d2go/trainer/fsdp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
d2go-main
|
d2go/trainer/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Trainer APIs on which D2Go's binary can build on top.
"""
from dataclasses import dataclass
from typing import Dict, Optional
from d2go.evaluation.api import AccuracyDict, MetricsDict
@dataclass
class TrainNetOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
model_configs: Dict[str, str]
# TODO (T127368603): decide if `tensorboard_log_dir` should be part of output
tensorboard_log_dir: Optional[str] = None
@dataclass
class TestNetOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
# TODO (T127368603): decide if `tensorboard_log_dir` should be part of output
tensorboard_log_dir: Optional[str] = None
@dataclass
class EvaluatorOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
def do_train():
pass
def do_test():
pass
|
d2go-main
|
d2go/trainer/api.py
|
from functools import partial
from typing import Any, Callable, Iterable, List, Optional, Union
import torch
from detectron2.utils.registry import Registry
from torch.distributed.fsdp.wrap import (
always_wrap_policy as _always_wrap_policy,
size_based_auto_wrap_policy as _size_based_auto_wrap_policy,
transformer_auto_wrap_policy as _layer_based_auto_wrap_policy,
)
D2GO_WRAP_POLICY_REGISTRY = Registry("D2GO_WRAP_POLICY_REGISTRY")
def parse_precision_from_string(
precision: str, lightning=False
) -> Union[str, int, torch.dtype]:
"""
Convert our string format for precision to what Detectron2 / lightning Trainer expects, controlled by the *lightning* flag
"""
if precision == "float64":
return torch.float64 if not lightning else 64
if precision == "float32":
return torch.float32 if not lightning else 32
elif precision == "float16":
return torch.float16 if not lightning else 16
elif precision == "bfloat16":
return torch.bfloat16 if not lightning else "bf16"
else:
raise ValueError(f"Invalid precision dtype {precision}")
def get_module_class_from_name(module, name):
"""
Gets a class from a module by its name. Code borrowed from HuggingFace
Args:
module (`torch.nn.Module`): The module to get the class from.
name (`str`): The name of the class.
"""
modules_children = list(module.children())
if module.__class__.__name__ == name:
return module.__class__
elif len(modules_children) == 0:
return
else:
for child_module in modules_children:
module_class = get_module_class_from_name(child_module, name)
if module_class is not None:
return module_class
def get_layer_cls_from_names(
model: Any, layer_names: Iterable[str]
) -> List[torch.nn.Module]:
"""
Get a list of layers from a model that match a list of layer names.
"""
layer_cls = []
for name in layer_names:
closure = get_module_class_from_name(model, name)
if closure is None:
raise Exception(
f"Could not find the layer class {name} to wrap in the model."
)
layer_cls.append(closure)
return layer_cls
@D2GO_WRAP_POLICY_REGISTRY.register()
def never_wrap_policy(model, **kwargs) -> Optional[Callable]:
"""
Don't wrap any child module, only wrap the root
"""
def never_wrap(*args, **kwargs):
return False
return never_wrap
@D2GO_WRAP_POLICY_REGISTRY.register()
def always_wrap_policy(model, **kwargs) -> Optional[Callable]:
"""
Wrapper for always_wrap_policy() from torch.distributed.fsdp.wrap
"""
return _always_wrap_policy
@D2GO_WRAP_POLICY_REGISTRY.register()
def size_based_auto_wrap_policy(
model, min_num_params=1e4, **kwargs
) -> Optional[Callable]:
"""
Wrapper for size_based_auto_wrap_policy() from torch.distributed.fsdp.wrap
"""
# Note: be careful when using auto wrap with shared parameters.
# Errors will be thrown if shared parameters reside in different FSDP units
return partial(
_size_based_auto_wrap_policy,
min_num_params=min_num_params,
)
@D2GO_WRAP_POLICY_REGISTRY.register()
def layer_based_auto_wrap_policy(
model, layer_names: Iterable[str], **kwargs
) -> Optional[Callable]:
"""
Wrapper for transformer_auto_wrap_policy() from torch.distributed.fsdp.wrap
Args:
layer_names: a list of layer names
"""
assert (
len(layer_names) > 0
), "layer_names should be a nonempty list of layer names contained in the model"
layer_cls = get_layer_cls_from_names(model, layer_names)
return partial(
_layer_based_auto_wrap_policy,
transformer_layer_cls=layer_cls,
)
|
d2go-main
|
d2go/trainer/helper.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
d2go-main
|
d2go/trainer/lightning/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from typing import Dict
import pytorch_lightning as pl
from d2go.config import CfgNode, temp_defrost
from d2go.runner.lightning_task import GeneralizedRCNNTask
from d2go.utils.misc import dump_trained_model_configs
from detectron2.utils.events import EventStorage
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}"
def _do_train(
cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask
) -> Dict[str, str]:
"""Runs the training loop with given trainer and task.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
Returns:
A map of model name to trained model config path.
"""
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT)
trainer.save_checkpoint(final_ckpt) # for validation monitor
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = final_ckpt
model_configs = dump_trained_model_configs(
cfg.OUTPUT_DIR, {"model_final": trained_cfg}
)
return model_configs
def _do_test(trainer: pl.Trainer, task: GeneralizedRCNNTask):
"""Runs the evaluation with a pre-trained model.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
"""
with EventStorage() as storage:
task.storage = storage
trainer.test(task)
|
d2go-main
|
d2go/trainer/lightning/training_loop.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import networkx as nx
import numpy as np
import os
import tempfile
import torch
import torch.nn as nn
from networkx.algorithms.bipartite.matrix import from_biadjacency_matrix
from scipy.sparse import csr_matrix
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from transformers import AutoTokenizer
import regex
import collections
from glob import glob
class CRISSAligner(object):
def __init__(self, path='criss/criss-3rd.pt',
args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu', distortion=0,
matching_method='a'
):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
self.distortion = distortion
self.matching_method = matching_method
def get_embed(self, bpe_lists, langcodes=('en_XX', 'en_XX')):
vectors = list()
for i, bpe_list in enumerate(bpe_lists):
input_ids = self.tokenizer.convert_tokens_to_ids(bpe_list + ['</s>', langcodes[i]])
encoder_input = {
'src_tokens': torch.tensor(input_ids).view(1, -1).to(self.device),
'src_lengths': torch.tensor([len(input_ids)]).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().squeeze(1).numpy().astype(np.float32)
vectors.append(np_encoder_outs[:-2, :])
return vectors
def get_word_aligns(self, src_sent, trg_sent, langcodes=None, fwd_dict=None, bwd_dict=None, debug=False):
l1_tokens = [self.tokenizer.tokenize(word) for word in src_sent]
l2_tokens = [self.tokenizer.tokenize(word) for word in trg_sent]
bpe_lists = [[bpe for w in sent for bpe in w] for sent in [l1_tokens, l2_tokens]]
l1_b2w_map = list()
for i, wlist in enumerate(l1_tokens):
l1_b2w_map += [i for _ in wlist]
l2_b2w_map = list()
for i, wlist in enumerate(l2_tokens):
l2_b2w_map += [i for _ in wlist]
vectors = self.get_embed(list(bpe_lists), langcodes)
sim = (cosine_similarity(vectors[0], vectors[1]) + 1.0) / 2.0
sim = self.apply_distortion(sim, self.distortion)
all_mats = dict()
fwd, bwd = self.get_alignment_matrix(sim)
if self.matching_method.find('a') != -1:
all_mats['inter'] = fwd * bwd
if self.matching_method.find('i') != -1:
all_mats['itermax'] = self.iter_max(sim)
if self.matching_method.find('m') != -1:
all_mats['mwmf'] = self.get_max_weight_match(sim)
if self.matching_method.find('f') != -1:
all_mats['fixed'] = fwd * bwd
aligns = {k: set() for k in all_mats}
for key in aligns:
for i in range(vectors[0].shape[0]):
for j in range(vectors[1].shape[0]):
if all_mats[key][i, j] > 1e-10:
aligns[key].add((l1_b2w_map[i], l2_b2w_map[j]))
if 'fixed' in aligns:
src_aligned = set([x[0] for x in aligns['fixed']])
trg_aligned = set([x[1] for x in aligns['fixed']])
candidate_alignment = list()
for i, sw in enumerate(src_sent):
sw = sw.lower()
if i not in src_aligned:
for j, tw in enumerate(trg_sent):
tw = tw.lower()
if tw in fwd_dict[sw]:
ri = i / len(src_sent)
rj = j / len(trg_sent)
if -0.2 < ri - rj < 0.2:
candidate_alignment.append((sw, tw, i, j, fwd_dict[sw][tw], 0))
for j, tw in enumerate(trg_sent):
tw = tw.lower()
if j not in trg_aligned:
for i, sw in enumerate(src_sent):
sw = sw.lower()
if sw in bwd_dict[tw]:
ri = i / len(src_sent)
rj = j / len(trg_sent)
if -0.2 < ri - rj < 0.2:
candidate_alignment.append((sw, tw, i, j, bwd_dict[tw][sw], 1))
candidate_alignment = sorted(candidate_alignment, key=lambda x: -x[-2])
for sw, tw, i, j, val, d in candidate_alignment:
if regex.match(r'\p{P}', sw) or regex.match(r'\p{P}', tw):
continue
if val < 0.05:
break
if d == 0:
if i in src_aligned:
continue
if (j not in trg_aligned) or ((i-1, j) in aligns['fixed']) or ((i+1, j) in aligns['fixed']):
aligns['fixed'].add((i, j))
src_aligned.add(i)
trg_aligned.add(j)
if debug:
print(sw, tw, i, j, val, d)
else:
if j in trg_aligned:
continue
if (i not in src_aligned) or ((i, j+1) in aligns['fixed']) or ((i, j-1) in aligns['fixed']):
aligns['fixed'].add((i, j))
src_aligned.add(i)
trg_aligned.add(j)
if debug:
print(sw, tw, i, j, val, d)
for ext in aligns:
aligns[ext] = sorted(aligns[ext])
return aligns
@staticmethod
def get_max_weight_match(sim):
if nx is None:
raise ValueError("networkx must be installed to use match algorithm.")
def permute(edge):
if edge[0] < sim.shape[0]:
return edge[0], edge[1] - sim.shape[0]
else:
return edge[1], edge[0] - sim.shape[0]
G = from_biadjacency_matrix(csr_matrix(sim))
matching = nx.max_weight_matching(G, maxcardinality=True)
matching = [permute(x) for x in matching]
matching = sorted(matching, key=lambda x: x[0])
res_matrix = np.zeros_like(sim)
for edge in matching:
res_matrix[edge[0], edge[1]] = 1
return res_matrix
@staticmethod
def iter_max(sim_matrix, max_count=2):
alpha_ratio = 0.9
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
inter = forward * backward.transpose()
if min(m, n) <= 2:
return inter
new_inter = np.zeros((m, n))
count = 1
while count < max_count:
mask_x = 1.0 - np.tile(inter.sum(1)[:, np.newaxis], (1, n)).clip(0.0, 1.0)
mask_y = 1.0 - np.tile(inter.sum(0)[np.newaxis, :], (m, 1)).clip(0.0, 1.0)
mask = ((alpha_ratio * mask_x) + (alpha_ratio * mask_y)).clip(0.0, 1.0)
mask_zeros = 1.0 - ((1.0 - mask_x) * (1.0 - mask_y))
if mask_x.sum() < 1.0 or mask_y.sum() < 1.0:
mask *= 0.0
mask_zeros *= 0.0
new_sim = sim_matrix * mask
fwd = np.eye(n)[new_sim.argmax(axis=1)] * mask_zeros
bac = np.eye(m)[new_sim.argmax(axis=0)].transpose() * mask_zeros
new_inter = fwd * bac
if np.array_equal(inter + new_inter, inter):
break
inter = inter + new_inter
count += 1
return inter
@staticmethod
def get_alignment_matrix(sim_matrix):
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
return forward, backward.transpose()
@staticmethod
def apply_distortion(sim_matrix, ratio=0.5):
shape = sim_matrix.shape
if (shape[0] < 2 or shape[1] < 2) or ratio == 0.0:
return sim_matrix
pos_x = np.array([[y / float(shape[1] - 1) for y in range(shape[1])] for x in range(shape[0])])
pos_y = np.array([[x / float(shape[0] - 1) for x in range(shape[0])] for y in range(shape[1])])
distortion_mask = 1.0 - ((pos_x - np.transpose(pos_y)) ** 2) * ratio
return np.multiply(sim_matrix, distortion_mask)
class Aligner(object):
def __init__(self, aligner_type, **kwargs):
self.aligner_type = aligner_type
if aligner_type == 'simalign':
from simalign import SentenceAligner
d = 'cuda' if torch.cuda.is_available() else 'cpu'
self.aligner = SentenceAligner('xlm-roberta-base', device=d, **kwargs)
elif aligner_type in ['fastalign', 'giza++']:
pass
elif aligner_type == 'criss-align':
self.aligner = CRISSAligner(**kwargs)
else:
raise Exception('Aligner type not supported.')
def align_sents(self, sent_pairs, train_file=None, **kwargs):
aligns = list()
if self.aligner_type in ['simalign', 'criss-align']:
for src, trg in tqdm(sent_pairs):
src = src.strip().split()
trg = trg.strip().split()
align_info = self.aligner.get_word_aligns(src, trg, **kwargs)
result = None
for key in align_info:
if result is None:
result = set(align_info[key])
else:
result = result.intersection(align_info[key])
aligns.append(' '.join(['-'.join([str(x) for x in item]) for item in sorted(result)]))
elif self.aligner_type == 'fastalign':
temp_dir = tempfile.TemporaryDirectory(prefix='fast-align')
with open(os.path.join(temp_dir.name, 'bitext.txt'), 'w') as fout:
for ss, ts in sent_pairs:
fout.write(ss + ' ||| ' + ts + '\n')
fout.close()
if train_file is not None:
assert os.path.exists(train_file)
os.system(f'cat {train_file} >> {temp_dir.name}/bitext.txt')
os.system(f'fast_align -d -o -v -i {temp_dir.name}/bitext.txt > {temp_dir.name}/fwd.align')
os.system(f'fast_align -d -o -v -r -i {temp_dir.name}/bitext.txt > {temp_dir.name}/bwd.align')
os.system(f'atools -i {temp_dir.name}/fwd.align -j {temp_dir.name}/bwd.align -c grow-diag-final-and > {temp_dir.name}/final.align')
aligns = [x.strip() for x in open(f'{temp_dir.name}/final.align').readlines()][:len(sent_pairs)]
elif self.aligner_type == 'giza++':
assert train_file is not None
giza_path = '/private/home/fhs/codebase/lexind/fairseq/2-word-align-final/giza-pp/GIZA++-v2/GIZA++'
temp_dir = tempfile.TemporaryDirectory(prefix='giza++')
d_src = collections.Counter()
d_trg = collections.Counter()
w2id_src = collections.defaultdict()
w2id_trg = collections.defaultdict()
for sent_pair in open(train_file):
ss, ts = regex.split(r'\|\|\|', sent_pair.lower())
for w in ss.strip().split():
d_src[w] += 1
for w in ts.strip().split():
d_trg[w] += 1
for ss, ts in sent_pairs:
ss = ss.lower()
ts = ts.lower()
for w in ss.strip().split():
d_src[w] += 1
for w in ts.strip().split():
d_trg[w] += 1
with open(os.path.join(temp_dir.name, 's.vcb'), 'w') as fout:
for i, w in enumerate(sorted(d_src.keys())):
print(i + 1, w, d_src[w], file=fout)
w2id_src[w] = i + 1
fout.close()
with open(os.path.join(temp_dir.name, 't.vcb'), 'w') as fout:
for i, w in enumerate(sorted(d_trg.keys())):
print(i + 1, w, d_trg[w], file=fout)
w2id_trg[w] = i + 1
fout.close()
with open(os.path.join(temp_dir.name, 'bitext.train'), 'w') as fout:
for sent_pair in open(train_file):
ss, ts = regex.split(r'\|\|\|', sent_pair.lower())
print(1, file=fout)
print(' '.join([str(w2id_src[x]) for x in ss.strip().split()]), file=fout)
print(' '.join([str(w2id_trg[x]) for x in ts.strip().split()]), file=fout)
fout.close()
with open(os.path.join(temp_dir.name, 'bitext.test'), 'w') as fout:
for ss, ts in sent_pairs:
ss = ss.lower()
ts = ts.lower()
print(1, file=fout)
print(' '.join([str(w2id_src[x]) for x in ss.strip().split()]), file=fout)
print(' '.join([str(w2id_trg[x]) for x in ts.strip().split()]), file=fout)
fout.close()
os.chdir(f'{temp_dir.name}')
os.system(f'{giza_path} -S {temp_dir.name}/s.vcb -T {temp_dir.name}/t.vcb -C {temp_dir.name}/bitext.train -tc {temp_dir.name}/bitext.test')
# read giza++ results
for i, line in enumerate(open(glob(f'{temp_dir.name}/*tst.A3*')[0])):
if i % 3 == 2:
align = list()
is_trg = False
is_null = False
src_idx = 0
for item in line.strip().split():
if item == '({':
is_trg = True
elif item == '})':
is_trg = False
elif is_trg:
if not is_null:
trg_idx = int(item)
align.append(f'{src_idx}-{trg_idx}')
elif item != 'NULL':
src_idx += 1
is_null = False
else:
is_null = True
aligns.append(' '.join(align))
temp_dir.cleanup()
return aligns
class CRISSWrapper(object):
def __init__(self, path='criss/criss-3rd.pt', args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu'):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
def embed(self, words, langcode='en_XX'):
lbs, rbs = list(), list()
tokens, word_ids = list(), list()
for word in words:
word_tokens = self.tokenizer.tokenize(word)
lbs.append(len(tokens))
tokens.extend(word_tokens)
rbs.append(len(tokens))
tokens = [tokens + ['</s>', langcode]]
lengths = [len(x) for x in tokens]
max_length = max(lengths)
for i in range(len(tokens)):
word_ids.append(self.tokenizer.convert_tokens_to_ids(['<pad>'] * (max_length - len(tokens[i])) + tokens[i]))
encoder_input = {
'src_tokens': torch.tensor(word_ids).to(self.device),
'src_lengths': torch.tensor(lengths).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.float().detach()
word_features = list()
for i, lb in enumerate(lbs):
rb = rbs[i]
word_features.append(np_encoder_outs[lb:rb].mean(0))
word_features = torch.cat(word_features, dim=0)
return word_features
class WordAligner(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=1, feature_transform=3):
super(WordAligner, self).__init__()
layers = list()
hidden_dims = [input_dim] + hidden_dims
for i in range(1, len(hidden_dims)):
layers.append(nn.Linear(hidden_dims[i-1], hidden_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_dims[-1], output_dim))
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
self.bias = nn.Parameter(torch.ones(feature_transform))
self.feature_transform = feature_transform
def forward(self, x):
transformed_features = torch.cat([x[:, :-self.feature_transform], torch.log(x[:, -self.feature_transform:] + self.bias.abs())], dim=-1)
return self.model(transformed_features)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
|
bitext-lexind-main
|
align/models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from train import *
from data import AlignDataset
import collections
import copy
import numpy as np
from models import Aligner
def eval_align(gold, silver, adjust=0):
assert len(gold) == len(silver)
a_size = s_size = p_size = ap_inter = as_inter = 0
for i, g in enumerate(gold):
s = set([
tuple(map(lambda x: int(x), item.split('-')))
for item in filter(lambda x: x.find('p') == -1, g.split())
])
p = set([tuple(map(lambda x: int(x), regex.split('-|p', item))) for item in g.split()])
a = set([tuple(map(lambda x: int(x) + adjust, regex.split('-', item))) for item in silver[i].split()])
ap_inter += len(a.intersection(p))
as_inter += len(a.intersection(s))
a_size += len(a)
p_size += len(p)
s_size += len(s)
prec = ap_inter / a_size if a_size > 0 else 0
rec = as_inter / s_size if s_size > 0 else 0
return {
'prec': prec,
'rec': rec,
'f1': 2 * prec * rec / (prec + rec) if s_size > 0 and a_size > 0 else 0,
'aer': 1 - (as_inter + ap_inter) / (a_size + s_size)
}
def inference(simalign, probs, threshold):
n, m = probs.shape
ids = probs.view(-1).argsort(descending=True)
f = lambda x, m: (x.item()//m, x.item()%m)
src2trg = collections.defaultdict(set)
trg2src = collections.defaultdict(set)
results = set()
for pair in simalign.split():
x, y = pair.split('-')
x = int(x)
y = int(y)
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
for idx in ids:
x, y = f(idx, m)
if probs[x, y] < threshold: # too low similarity
break
if (x not in src2trg) and (y not in trg2src): # perfect company, keep
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
elif (x in src2trg) and (y in trg2src): # both have other companies, skip
continue
elif x in src2trg: # x has company, but y is still addable
if y == max(src2trg[x]) + 1 or y == min(src2trg[x]) - 1:
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
else:
if x == max(trg2src[y]) + 1 or x == min(trg2src[y]) - 1:
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
results = ' '.join([f'{x}-{y}' for x, y in sorted(results)])
return results
def test(configs, criss, dataset, simaligns, threshold=0.5):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path,
configs.src_lang, configs.trg_lang, configs.reversed
)
aligner = WordAligner(5 + (2 if configs.use_criss else 0), configs.hiddens, 3, 5).to(configs.device)
model_path = configs.save_path+f'/model.pt'
results = list()
aligner.load_state_dict(torch.load(model_path))
for idx, batch in enumerate(tqdm(dataset.sent_pairs)):
ss, ts = batch
ss = ss.split()
ts = ts.split()
if criss is not None:
semb = criss.embed(ss, langcode=configs.src_lang)
temb = criss.embed(ts, langcode=configs.trg_lang)
cos_matrix = cos(semb.unsqueeze(1), temb.unsqueeze(0)).unsqueeze(-1).unsqueeze(-1)
ip_matrix = (semb.unsqueeze(1) * temb.unsqueeze(0)).sum(-1).unsqueeze(-1).unsqueeze(-1)
feat_matrix = torch.cat((cos_matrix, ip_matrix), dim=-1)
word_pairs = list()
criss_features = list()
for i, sw in enumerate(ss):
for j, tw in enumerate(ts):
word_pairs.append((sw, tw))
criss_features.append(feat_matrix[i, j])
scores = extract_scores(word_pairs, criss_features, aligner, info, configs).reshape(len(ss), len(ts), -1)
scores = scores.softmax(-1)
arrange = torch.arange(3).to(configs.device).view(1, 1, -1)
scores = (scores * arrange).sum(-1)
result = inference(simaligns[idx], scores, threshold)
results.append(result)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-g', '--ground-truth', type=str, default='./data/align/', help='path to ground-truth')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-m', '--model-path', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.model_path,
'batch_size': 128,
'epochs': 100,
'device': args.device,
'hiddens': [8],
'use_criss': True,
'src_lang': args.source,
'trg_lang': args.target,
'threshold': 1.0
}
)
criss = CRISSWrapper(device=configs.device)
dataset = collections.defaultdict(None)
simaligner = Aligner(
'criss-align', distortion=0,
path='criss/criss-3rd.pt', args_path='criss/args.pt',
matching_method='a'
)
lp = (args.source, args.target)
dset = AlignDataset(args.ground_truth, f'{args.source.split("_")[0]}-{args.target.split("_")[0]}')
simaligns = simaligner.align_sents(dset.sent_pairs, langcodes=lp)
aligns = test(configs, criss, dset, simaligns, configs.threshold)
results = eval_align(dset.ground_truth, aligns, 1)
print(results)
from IPython import embed; embed(using=False)
|
bitext-lexind-main
|
align/test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import regex
from data import AlignDataset
from evaluate import evaluate
from models import Aligner
import collections
resdict = collections.defaultdict(None)
aligner = Aligner(
'criss-align', distortion=0,
path='criss/criss-3rd.pt',
args_path='criss/args.pt',
matching_method='a'
)
dset = AlignDataset('data/align/', 'de-en')
aligns = aligner.align_sents(dset.sent_pairs, langcodes=('de_DE', 'en_XX'))
res = evaluate(dset.ground_truth, aligns, 1)
print('de-en:', res)
|
bitext-lexind-main
|
align/eval_simalign_criss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from glob import glob
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from models import CRISSWrapper, WordAligner
from data import BitextAlignmentDataset
cos = torch.nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']]
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def extract_scores(batch, criss_features, aligner, info, configs):
coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg = info
all_scores = list()
for i in range(0, len(batch), configs.batch_size):
subbatch = batch[i:i+configs.batch_size]
src_words, trg_words = zip(*subbatch)
features = torch.tensor(
[
[
matched_coocc[x[0]][x[1]],
semi_matched_coocc[x[0]][x[1]],
coocc[x[0]][x[1]],
freq_src[x[0]],
freq_trg[x[1]]
] for x in subbatch
]
).float().to(configs.device).reshape(-1, 5)
if configs.use_criss:
subbatch_crissfeat = torch.cat(criss_features[i:i+configs.batch_size], dim=0)
features = torch.cat((subbatch_crissfeat, features), dim=-1).detach()
scores = aligner(features).squeeze(-1)
all_scores.append(scores)
return torch.cat(all_scores, dim=0)
def train(configs, logging_steps=50000):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path,
configs.src_lang, configs.trg_lang, configs.reversed
)
if configs.use_criss:
criss = CRISSWrapper(device=configs.device)
else:
criss = None
dataset = BitextAlignmentDataset(configs.bitext_path, configs.align_path)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=dataset.collate_fn)
aligner = WordAligner(5 + (2 if configs.use_criss else 0), configs.hiddens, 3, 5).to(configs.device)
optimizer = torch.optim.Adam(aligner.parameters(), lr=.0005)
for epoch in range(configs.epochs):
model_cnt = 0
total_loss = total_cnt = 0
bar = tqdm(dataloader)
for idx, batch in enumerate(bar):
(ss, ts), edges = batch[0]
if criss is not None:
semb = criss.embed(ss, langcode=configs.src_lang)
temb = criss.embed(ts, langcode=configs.trg_lang)
cos_matrix = cos(semb.unsqueeze(1), temb.unsqueeze(0)).unsqueeze(-1).unsqueeze(-1)
ip_matrix = (semb.unsqueeze(1) * temb.unsqueeze(0)).sum(-1).unsqueeze(-1).unsqueeze(-1)
feat_matrix = torch.cat((cos_matrix, ip_matrix), dim=-1)
# adding contexualized embeddings here
training_sets = collections.defaultdict(list)
criss_features = collections.defaultdict(list)
for i, sw in enumerate(ss):
for j, tw in enumerate(ts):
label = edges[i, j]
training_sets[label].append((sw, tw))
if criss is not None:
criss_features[label].append(feat_matrix[i, j])
max_len = max(len(training_sets[k]) for k in training_sets)
training_set = list()
criss_feats = list()
targets = list()
for key in training_sets:
training_set += training_sets[key] * (max_len // len(training_sets[key]))
criss_feats += criss_features[key] * (max_len // len(training_sets[key]))
targets += [key] * len(training_sets[key]) * (max_len // len(training_sets[key]))
targets = torch.tensor(targets).long().to(configs.device)
scores = extract_scores(training_set, criss_feats, aligner, info, configs)
optimizer.zero_grad()
loss = nn.CrossEntropyLoss()(scores, targets)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
bar.set_description(f'loss={total_loss / total_cnt:.5f}')
if (idx + 1) % logging_steps == 0:
print(f'Epoch {epoch}, step {idx+1}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(aligner.state_dict(), configs.save_path + f'/model.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 100,
'device': args.device,
'hiddens': [8],
'use_criss': True,
'src_lang': args.source,
'trg_lang': args.target
}
)
train(configs)
|
bitext-lexind-main
|
align/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import regex
def evaluate(gold, silver, offset=0):
assert len(gold) == len(silver)
a_size = s_size = p_size = ap_inter = as_inter = 0
for i, g in enumerate(gold):
s = set([
tuple(map(lambda x: int(x), item.split('-')))
for item in filter(lambda x: x.find('p') == -1, g.split())
])
p = set([tuple(map(lambda x: int(x), regex.split('-|p', item))) for item in g.split()])
a = set([tuple(map(lambda x: int(x) + offset, regex.split('-', item))) for item in silver[i].split()])
ap_inter += len(a.intersection(p))
as_inter += len(a.intersection(s))
a_size += len(a)
p_size += len(p)
s_size += len(s)
prec = ap_inter / a_size if a_size > 0 else 0
rec = as_inter / s_size if s_size > 0 else 0
return {
'prec': prec,
'rec': rec,
'f1': 2 * prec * rec / (prec + rec) if s_size > 0 and a_size > 0 else 0,
'aer': 1 - (as_inter + ap_inter) / (a_size + s_size)
}
|
bitext-lexind-main
|
align/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch.utils.data import DataLoader, Dataset
import regex
import json
import numpy as np
import os
class BitextAlignmentDataset(Dataset):
def __init__(self, bitext_path, alignment_path):
super(BitextAlignmentDataset, self).__init__()
self.bitext_path = bitext_path
self.alignment_path = alignment_path
bitext = [regex.split(r'\|\|\|', x.strip()) for x in open(bitext_path)]
align = open(alignment_path).readlines()
self.bitext, self.edges = self.filter(bitext, align)
assert len(self.bitext) == len(self.edges)
@staticmethod
def filter(bitext, align):
real_bitext = list()
edges = list()
for i, a in enumerate(align):
try:
a = json.loads(a)
if len(bitext[i]) == 2:
bitext[i][0] = bitext[i][0].split()
bitext[i][1] = bitext[i][1].split()
real_bitext.append(bitext[i])
edge_info = np.zeros((len(bitext[i][0]), len(bitext[i][1])))
for x, y in a['inter']:
edge_info[x, y] = 2
for x, y in a['itermax']:
if edge_info[x, y] == 0:
edge_info[x, y] = 1
edges.append(edge_info)
except:
continue
return real_bitext, edges
def __getitem__(self, index):
return self.bitext[index], self.edges[index]
def __len__(self):
return len(self.bitext)
@staticmethod
def collate_fn(batch):
return batch
class AlignDataset(object):
def __init__(self, path, langs, split='test'):
if langs == 'de-en':
src_sents = [x.strip() for x in open(os.path.join(path, langs, 'de'), encoding='iso-8859-1').readlines()][:-1]
trg_sents = [x.strip() for x in open(os.path.join(path, langs, 'en'), encoding='iso-8859-1').readlines()][:-1]
self.ground_truth = self.load_std_file(os.path.join(path, langs, 'alignmentDeEn.talp'))[:-1]
elif langs == 'ro-en' or langs == 'en-fr':
src_id2s = dict()
trg_id2s = dict()
for fpair in open(os.path.join(path, langs, split, f'FilePairs.{split}')):
sf, tf = fpair.strip().split()
for line in open(os.path.join(path, langs, split, sf), encoding='iso-8859-1'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
src_id2s[idx] = sent
for line in open(os.path.join(path, langs, split, tf), encoding='iso-8859-1'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
trg_id2s[idx] = sent
src_sents = [src_id2s[key] for key in sorted(src_id2s.keys())]
trg_sents = [trg_id2s[key] for key in sorted(trg_id2s.keys())]
snum2idx = dict([(key, i) for i, key in enumerate(sorted(trg_id2s.keys()))])
assert len(src_id2s) == len(trg_id2s)
ground_truth = [list() for _ in src_id2s]
raw_gt = open(os.path.join(path, langs, split, f'{split}.wa.nonullalign')).readlines()
for line in raw_gt:
sid, s, t, sure = line.strip().split()
idx = snum2idx[sid]
if sure == 'S':
align = '-'.join([s, t])
else:
assert sure == 'P'
align = 'p'.join([s, t])
ground_truth[idx].append(align)
for i, item in enumerate(ground_truth):
ground_truth[i] = ' '.join(item)
self.ground_truth = ground_truth
elif langs == 'en-hi':
src_id2s = dict()
trg_id2s = dict()
sf = f'{split}.e'
tf = f'{split}.h'
for line in open(os.path.join(path, langs, split, sf), encoding='us-ascii'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
src_id2s[idx] = sent
for line in open(os.path.join(path, langs, split, tf), encoding='utf-8'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
trg_id2s[idx] = sent
src_sents = [src_id2s[key] for key in sorted(src_id2s.keys())]
trg_sents = [trg_id2s[key] for key in sorted(trg_id2s.keys())]
snum2idx = dict([(key, i) for i, key in enumerate(sorted(trg_id2s.keys()))])
assert len(src_id2s) == len(trg_id2s)
ground_truth = [list() for _ in src_id2s]
raw_gt = open(os.path.join(path, langs, split, f'{split}.wa.nonullalign')).readlines()
for line in raw_gt:
sid, s, t = line.strip().split()
idx = snum2idx[sid]
align = '-'.join([s, t])
ground_truth[idx].append(align)
for i, item in enumerate(ground_truth):
ground_truth[i] = ' '.join(item)
self.ground_truth = ground_truth
else:
raise Exception('language pair not supported.')
self.sent_pairs = list(zip(src_sents, trg_sents))
assert len(self.sent_pairs) == len(self.ground_truth)
@staticmethod
def load_std_file(path):
return [x.strip() for x in open(path)]
|
bitext-lexind-main
|
align/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']]
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def extract_dataset(train_lexicon, test_lexicon, coocc, configs):
cooccs = [coocc]
test_set = set()
pos_training_set = set()
neg_training_set = set()
for tsw in set([x[0] for x in train_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
ttw = to_traditional(stw) if configs.trg_lang == 'zh_CN' else stw
if (tsw, ttw) in train_lexicon:
pos_training_set.add((ssw, stw))
else:
neg_training_set.add((ssw, stw))
if (ssw, ssw) in train_lexicon:
pos_training_set.add((ssw, ssw))
else:
neg_training_set.add((ssw, ssw))
for tsw in set([x[0] for x in test_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
added_self = False
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
test_set.add((ssw, stw))
test_set.add((ssw, ssw))
pos_training_set = list(pos_training_set)
neg_training_set = list(neg_training_set)
test_set = list(test_set)
return pos_training_set, neg_training_set, test_set
def extract_probs(batch, criss, lexicon_inducer, info, configs):
matched_coocc, semi_matched_coocc, coocc, freq_src, freq_trg = info
all_probs = list()
for i in range(0, len(batch), configs.batch_size):
subbatch = batch[i:i+configs.batch_size]
src_words, trg_words = zip(*subbatch)
src_encodings = criss.word_embed(src_words, configs.src_lang).detach()
trg_encodings = criss.word_embed(trg_words, configs.trg_lang).detach()
cos_sim = cos(src_encodings, trg_encodings).reshape(-1, 1)
dot_prod = (src_encodings * trg_encodings).sum(-1).reshape(-1, 1)
features = torch.tensor(
[
[
matched_coocc[x[0]][x[1]],
semi_matched_coocc[x[0]][x[1]],
coocc[x[0]][x[1]],
freq_src[x[0]],
freq_trg[x[1]],
] for x in subbatch
]
).float().to(configs.device).reshape(-1, 5)
features = torch.cat([cos_sim, dot_prod, features], dim=-1)
probs = lexicon_inducer(features).squeeze(-1)
all_probs.append(probs)
return torch.cat(all_probs, dim=0)
def get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
):
induced_lexicon = list()
pred_test_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
test_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(test_set):
pred_test_lexicon[x][y] = max(pred_test_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in test_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_test_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in test_lexicon else 0
possible_predictions.append([tsw, ttw, pred_test_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
word_cnt = collections.Counter()
correct_predictions = 0
for i, item in enumerate(possible_predictions):
if item[-2] < best_threshold:
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(test_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
print(f'Test F1: {f1:.2f}')
break
if word_cnt[item[0]] == best_n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
induced_lexicon.append(item[:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
):
pred_train_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
pos_training_set + neg_training_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(pos_training_set + neg_training_set):
pred_train_lexicon[x][y] = max(pred_train_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in train_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_train_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in train_lexicon else 0
possible_predictions.append([tsw, ttw, pred_train_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
best_f1 = -1e10
best_threshold = best_n_cand = 0
for n_cand in range(1, 6):
word_cnt = collections.Counter()
correct_predictions = 0
bar = tqdm(possible_predictions)
for i, item in enumerate(bar):
if word_cnt[item[0]] == n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(train_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
if f1 > best_f1:
best_f1 = f1
best_threshold = item[-2]
best_n_cand = n_cand
bar.set_description(
f'Best F1={f1:.1f}, Prec={prec:.1f}, Rec={rec:.1f}, NCand={n_cand}, Threshold={item[-2]}'
)
return best_threshold, best_n_cand
def train_test(configs, logging_steps=50000):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed)
# dataset
train_lexicon = load_lexicon(configs.tuning_set)
sim_train_lexicon = {(to_simplified(x[0]), to_simplified(x[1])) for x in train_lexicon}
all_train_lexicon = train_lexicon.union(sim_train_lexicon)
test_lexicon = load_lexicon(configs.test_set)
pos_training_set, neg_training_set, test_set = extract_dataset(
train_lexicon, test_lexicon, info[2], configs
)
training_set_modifier = max(1, len(neg_training_set) // len(pos_training_set))
training_set = pos_training_set * training_set_modifier + neg_training_set
print(f'Positive training set is repeated {training_set_modifier} times due to data imbalance.')
# model and optimizers
criss = CRISSWrapper(device=configs.device)
lexicon_inducer = LexiconInducer(7, configs.hiddens, 1, 5).to(configs.device)
optimizer = torch.optim.Adam(lexicon_inducer.parameters(), lr=.0005)
# train model
for epoch in range(configs.epochs):
model_path = configs.save_path + f'/{epoch}.model.pt'
if os.path.exists(model_path):
lexicon_inducer.load_state_dict(torch.load(model_path))
continue
random.shuffle(training_set)
bar = tqdm(range(0, len(training_set), configs.batch_size))
total_loss = total_cnt = 0
for i, sid in enumerate(bar):
batch = training_set[sid:sid+configs.batch_size]
probs = extract_probs(batch, criss, lexicon_inducer, info, configs)
targets = torch.tensor(
[1 if tuple(x) in all_train_lexicon else 0 for x in batch]).float().to(configs.device)
optimizer.zero_grad()
loss = nn.BCELoss()(probs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
bar.set_description(f'loss={total_loss / total_cnt:.5f}')
if (i + 1) % logging_steps == 0:
print(f'Epoch {epoch}, step {i+1}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/{epoch}.{i+1}.model.pt')
print(f'Epoch {epoch}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/model.pt')
best_threshold, best_n_cand = get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
)
induced_test_lexicon, test_eval = get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
)
with open(configs.save_path + '/induced.weaklysup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-tr', '--train', type=str, help='path to training lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'tuning_set': args.train,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8],
'src_lang': args.source,
'trg_lang': args.target
}
)
res = train_test(configs)
print(res[-1])
|
bitext-lexind-main
|
src/weakly_sup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from transformers import AutoTokenizer
import numpy as np
import torch
import torch.nn as nn
class CRISSWrapper(object):
def __init__(self, path='criss/criss-3rd.pt',
args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu'):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
def word_embed(self, words, langcode='en_XX'):
tokens = list()
word_ids = list()
for word in words:
word_tokens = self.tokenizer.tokenize(word) + ['</s>', langcode]
tokens.append(word_tokens)
lengths = [len(x) for x in tokens]
max_length = max(lengths)
for i in range(len(tokens)):
word_ids.append(self.tokenizer.convert_tokens_to_ids(['<pad>'] * (max_length - len(tokens[i])) + tokens[i]))
encoder_input = {
'src_tokens': torch.tensor(word_ids).to(self.device),
'src_lengths': torch.tensor(lengths).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.float().detach()
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.float().detach()
encoder_mask = encoder_mask.transpose(0, 1).unsqueeze(2)
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(dim=0)).sum(dim=0)
return avg_pool
class LexiconInducer(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=1, feature_transform=3):
super(LexiconInducer, self).__init__()
layers = list()
hidden_dims = [input_dim] + hidden_dims
for i in range(1, len(hidden_dims)):
layers.append(nn.Linear(hidden_dims[i-1], hidden_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_dims[-1], output_dim))
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
self.bias = nn.Parameter(torch.ones(feature_transform))
self.feature_transform = feature_transform
def forward(self, x):
transformed_features = torch.cat([x[:, :-self.feature_transform], torch.log(x[:, -self.feature_transform:] + self.bias.abs())], dim=-1)
return self.model(transformed_features)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
|
bitext-lexind-main
|
src/models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']] # only focus on inter based alignment
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def get_test_lexicon(test_lexicon, info):
induced_lexicon = list()
coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg = info
for tsw in tqdm(set([x[0] for x in test_lexicon])):
ssw = to_simplified(tsw)
candidates = list()
for stw in matched_coocc[ssw]:
ttw = to_traditional(stw)
candidates.append([tsw, ttw, matched_coocc[ssw][stw] / (coocc[ssw][stw] + 20)])
if len(candidates) == 0:
continue
candidates = sorted(candidates, key=lambda x:-x[-1])
induced_lexicon.append(candidates[0][:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def test(configs, logging_steps=50000):
setup_configs(configs)
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed
)
# dataset
test_lexicon = load_lexicon(configs.test_set)
induced_test_lexicon, test_eval = get_test_lexicon(test_lexicon, info)
with open(configs.save_path + '/induced.fullyunsup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8]
}
)
res = test(configs)
print(res[-1])
|
bitext-lexind-main
|
src/fully_unsup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
def evaluate(pr_pairs, gt_pairs):
gt_set = set([tuple(x) for x in gt_pairs])
pr_set = set([tuple(x) for x in pr_pairs])
prec = sum([1 if x in gt_set else 0 for x in pr_set]) \
/ float(len(pr_set)) if len(pr_set) > 0 else 0
rec = sum([1 if x in pr_set else 0 for x in gt_set]) \
/ float(len(gt_set)) if len(gt_set) > 0 else 0
gt_src_words = set([x[0] for x in gt_pairs])
pr_src_words = set([x[0] for x in pr_pairs])
oov_number = sum([1 if x not in pr_src_words else 0 for x in gt_src_words])
oov_rate = oov_number / float(len(gt_src_words))
eval_result = {
'oov_number': oov_number,
'oov_rate': oov_rate,
'precision': prec,
'recall': rec,
'f1': 2.0 * prec * rec / (prec + rec) if prec > 0 or rec > 0 else 0.0
}
return eval_result
|
bitext-lexind-main
|
src/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
SRC_ROOT = Path(os.path.dirname(os.path.realpath(__file__)))
PRO_ROOT = SRC_ROOT.parent
if __name__ == '__main__':
pass
|
anli-main
|
src/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from torch.optim import Adam
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLNetTokenizer, XLNetForSequenceClassification
# from transformers import XLNetTokenizer
# from modeling.dummy_modeling_xlnet import XLNetForSequenceClassification
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from transformers import BartTokenizer, BartForSequenceClassification
from transformers import ElectraTokenizer, ElectraForSequenceClassification
from torch.utils.data import Dataset, DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import config
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from flint.data_utils.batchbuilder import BaseBatchBuilder, move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from modeling.res_encoder import ResEncoder, EmptyScheduler, BagOfWords
from utils import common, list_dict_data_tool, save_tool
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.nn as nn
import numpy as np
import random
import torch
from tqdm import tqdm
import math
import copy
import pprint
pp = pprint.PrettyPrinter(indent=2)
# from fairseq.data.data_utils import collate_tokens
MODEL_CLASSES = {
"lstm-resencoder": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bag-of-words": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-base": {
"model_name": "bert-base-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-large": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "bert",
'insight_supported': True,
},
"xlnet-base": {
"model_name": "xlnet-base-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
},
"xlnet-large": {
"model_name": "xlnet-large-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
'insight_supported': True,
},
"roberta-base": {
"model_name": "roberta-base",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"roberta-large": {
"model_name": "roberta-large",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"albert-xxlarge": {
"model_name": "albert-xxlarge-v2",
"tokenizer": AlbertTokenizer,
"sequence_classification": AlbertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "albert",
'insight_supported': True,
},
"distilbert": {
"model_name": "distilbert-base-cased",
"tokenizer": DistilBertTokenizer,
"sequence_classification": DistilBertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
},
"bart-large": {
"model_name": "facebook/bart-large",
"tokenizer": BartTokenizer,
"sequence_classification": BartForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": ["model", "encoder", "embed_tokens"],
'insight_supported': True,
},
"electra-base": {
"model_name": "google/electra-base-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
},
"electra-large": {
"model_name": "google/electra-large-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
}
}
registered_path = {
'snli_train': config.PRO_ROOT / "data/build/snli/train.jsonl",
'snli_dev': config.PRO_ROOT / "data/build/snli/dev.jsonl",
'snli_test': config.PRO_ROOT / "data/build/snli/test.jsonl",
'mnli_train': config.PRO_ROOT / "data/build/mnli/train.jsonl",
'mnli_m_dev': config.PRO_ROOT / "data/build/mnli/m_dev.jsonl",
'mnli_mm_dev': config.PRO_ROOT / "data/build/mnli/mm_dev.jsonl",
'fever_train': config.PRO_ROOT / "data/build/fever_nli/train.jsonl",
'fever_dev': config.PRO_ROOT / "data/build/fever_nli/dev.jsonl",
'fever_test': config.PRO_ROOT / "data/build/fever_nli/test.jsonl",
'anli_r1_train': config.PRO_ROOT / "data/build/anli/r1/train.jsonl",
'anli_r1_dev': config.PRO_ROOT / "data/build/anli/r1/dev.jsonl",
'anli_r1_test': config.PRO_ROOT / "data/build/anli/r1/test.jsonl",
'anli_r2_train': config.PRO_ROOT / "data/build/anli/r2/train.jsonl",
'anli_r2_dev': config.PRO_ROOT / "data/build/anli/r2/dev.jsonl",
'anli_r2_test': config.PRO_ROOT / "data/build/anli/r2/test.jsonl",
'anli_r3_train': config.PRO_ROOT / "data/build/anli/r3/train.jsonl",
'anli_r3_dev': config.PRO_ROOT / "data/build/anli/r3/dev.jsonl",
'anli_r3_test': config.PRO_ROOT / "data/build/anli/r3/test.jsonl",
}
nli_label2index = {
'e': 0,
'n': 1,
'c': 2,
'h': -1,
}
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class NLIDataset(Dataset):
def __init__(self, data_list, transform) -> None:
super().__init__()
self.d_list = data_list
self.len = len(self.d_list)
self.transform = transform
def __getitem__(self, index: int):
return self.transform(self.d_list[index])
# you should write schema for each of the input elements
def __len__(self) -> int:
return self.len
class NLITransform(object):
def __init__(self, model_name, tokenizer, max_length=None):
self.model_name = model_name
self.tokenizer = tokenizer
self.max_length = max_length
def __call__(self, sample):
processed_sample = dict()
processed_sample['uid'] = sample['uid']
processed_sample['gold_label'] = sample['label']
processed_sample['y'] = nli_label2index[sample['label']]
# premise: str = sample['premise']
premise: str = sample['context'] if 'context' in sample else sample['premise']
hypothesis: str = sample['hypothesis']
if premise.strip() == '':
premise = 'empty'
if hypothesis.strip() == '':
hypothesis = 'empty'
tokenized_input_seq_pair = self.tokenizer.encode_plus(premise, hypothesis,
max_length=self.max_length,
return_token_type_ids=True, truncation=True)
processed_sample.update(tokenized_input_seq_pair)
return processed_sample
def build_eval_dataset_loader_and_sampler(d_list, data_transformer, batching_schema, batch_size_per_gpu_eval):
d_dataset = NLIDataset(d_list, data_transformer)
d_sampler = SequentialSampler(d_dataset)
d_dataloader = DataLoader(dataset=d_dataset,
batch_size=batch_size_per_gpu_eval,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=d_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
return d_dataset, d_sampler, d_dataloader
def sample_data_list(d_list, ratio):
if ratio <= 0:
raise ValueError("Invalid training weight ratio. Please change --train_weights.")
upper_int = int(math.ceil(ratio))
if upper_int == 1:
return d_list # if ratio is 1 then we just return the data list
else:
sampled_d_list = []
for _ in range(upper_int):
sampled_d_list.extend(copy.deepcopy(d_list))
if np.isclose(ratio, upper_int):
return sampled_d_list
else:
sampled_length = int(ratio * len(d_list))
random.shuffle(sampled_d_list)
return sampled_d_list[:sampled_length]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument("--single_gpu", action="store_true", help="If set, we only use single GPU.")
parser.add_argument("--fp16", action="store_true", help="If set, we will use fp16.")
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# environment arguments
parser.add_argument('-s', '--seed', default=1, type=int, metavar='N',
help='manual random seed')
parser.add_argument('-n', '--num_nodes', default=1, type=int, metavar='N',
help='number of nodes')
parser.add_argument('-g', '--gpus_per_node', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--node_rank', default=0, type=int,
help='ranking within the nodes')
# experiments specific arguments
parser.add_argument('--debug_mode',
action='store_true',
dest='debug_mode',
help='weather this is debug mode or normal')
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
)
parser.add_argument(
"--experiment_name",
type=str,
help="Set the name of the experiment. [model_name]/[data]/[task]/[other]",
)
parser.add_argument(
"--save_prediction",
action='store_true',
dest='save_prediction',
help='Do we want to save prediction')
parser.add_argument('--epochs', default=2, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument(
"--per_gpu_train_batch_size", default=16, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=64, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=160, type=int, help="Max length of the sequences.")
parser.add_argument("--warmup_steps", default=-1, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--eval_frequency", default=1000, type=int, help="set the evaluation frequency, evaluate every X global step.",
)
parser.add_argument("--train_data",
type=str,
help="The training data used in the experiments.")
parser.add_argument("--train_weights",
type=str,
help="The training data weights used in the experiments.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.world_size = 1
train(-1, args)
elif args.single_gpu:
args.world_size = 1
train(0, args)
else: # distributed multiGPU training
#########################################################
args.world_size = args.gpus_per_node * args.num_nodes #
# os.environ['MASTER_ADDR'] = '152.2.142.184' # This is the IP address for nlp5
# maybe we will automatically retrieve the IP later.
os.environ['MASTER_PORT'] = '88888' #
mp.spawn(train, nprocs=args.gpus_per_node, args=(args,)) # spawn how many process in this node
# remember train is called as train(i, args).
#########################################################
def train(local_rank, args):
# debug = False
# print("GPU:", gpu)
# world_size = args.world_size
args.global_rank = args.node_rank * args.gpus_per_node + local_rank
args.local_rank = local_rank
# args.warmup_steps = 20
debug_count = 1000
num_epoch = args.epochs
actual_train_batch_size = args.world_size * args.per_gpu_train_batch_size * args.gradient_accumulation_steps
args.actual_train_batch_size = actual_train_batch_size
set_seed(args.seed)
num_labels = 3 # we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_class_name = args.model_class_name
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
if model_class_name in ['lstm-resencoder']:
hg_model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(
config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
embedding = hg_model.bert.embeddings.word_embeddings
model = ResEncoder(v_size=embedding.weight.size(0), embd_dim=embedding.weight.size(1))
model.Embd.weight = embedding.weight
elif model_class_name in ['bag-of-words']:
hg_model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(
config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
embedding = hg_model.bert.embeddings.word_embeddings
model = BagOfWords(v_size=embedding.weight.size(0), embd_dim=embedding.weight.size(1))
model.Embd.weight = embedding.weight
else:
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_train = args.per_gpu_train_batch_size
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
if not args.cpu and not args.single_gpu:
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=args.global_rank
)
train_data_str = args.train_data
train_data_weights_str = args.train_weights
eval_data_str = args.eval_data
train_data_name = []
train_data_path = []
train_data_list = []
train_data_weights = []
eval_data_name = []
eval_data_path = []
eval_data_list = []
train_data_named_path = train_data_str.split(',')
weights_str = train_data_weights_str.split(',') if train_data_weights_str is not None else None
eval_data_named_path = eval_data_str.split(',')
for named_path in train_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
train_data_name.append(name)
train_data_path.append(path)
train_data_list.append(d_list)
if weights_str is not None:
for weights in weights_str:
train_data_weights.append(float(weights))
else:
for i in range(len(train_data_list)):
train_data_weights.append(1)
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
assert len(train_data_weights) == len(train_data_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
# data_transformer = NLITransform(model_name, tokenizer, max_length, with_element=True)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
# Estimate the training size:
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
estimated_training_size = len(training_list)
print("Estimated training size:", estimated_training_size)
# Estimate the training size ends:
# t_total = estimated_training_size // args.gradient_accumulation_steps * num_epoch
t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
if args.warmup_steps <= 0: # set the warmup steps to 0.1 * total step if the given warmup step is -1.
args.warmup_steps = int(t_total * 0.1)
if not args.cpu:
torch.cuda.set_device(args.local_rank)
model.cuda(args.local_rank)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if model_class_name not in ['lstm-resencoder']:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
else:
optimizer = Adam(optimizer_grouped_parameters)
scheduler = EmptyScheduler()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if not args.cpu and not args.single_gpu:
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
args_dict = dict(vars(args))
file_path_prefix = '.'
if args.global_rank in [-1, 0]:
print("Total Steps:", t_total)
args.total_step = t_total
print("Warmup Steps:", args.warmup_steps)
print("Actual Training Batch Size:", actual_train_batch_size)
print("Arguments", pp.pprint(args))
# Let build the logger and log everything before the start of the first training epoch.
if args.global_rank in [-1, 0]: # only do logging if we use cpu or global_rank=0
if not args.debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{args.experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save option file
common.save_json(args_dict, os.path.join(file_path_prefix, "args.json"))
checkpoints_path = Path(file_path_prefix) / "checkpoints"
if not checkpoints_path.exists():
checkpoints_path.mkdir()
prediction_path = Path(file_path_prefix) / "predictions"
if not prediction_path.exists():
prediction_path.mkdir()
global_step = 0
# print(f"Global Rank:{args.global_rank} ### ", 'Init!')
for epoch in tqdm(range(num_epoch), desc="Epoch", disable=args.global_rank not in [-1, 0]):
# Let's build up training dataset for this epoch
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
random.shuffle(training_list)
train_dataset = NLIDataset(training_list, data_transformer)
train_sampler = SequentialSampler(train_dataset)
if not args.cpu and not args.single_gpu:
print("Use distributed sampler.")
train_sampler = DistributedSampler(train_dataset, args.world_size, args.global_rank,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size_per_gpu_train,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=train_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
# training build finished.
print(debug_node_info(args), "epoch: ", epoch)
if not args.cpu and not args.single_gpu:
train_sampler.set_epoch(epoch) # setup the epoch to ensure random sampling at each epoch
for forward_step, batch in enumerate(tqdm(train_dataloader, desc="Iteration",
disable=args.global_rank not in [-1, 0]), 0):
model.train()
batch = move_to_device(batch, local_rank)
# print(batch['input_ids'], batch['y'])
if args.model_class_name in ["distilbert", "bart-large", "lstm-resencoder", "bag-of-words"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
# print(debug_node_info(args), loss, logits, batch['uid'])
# print(debug_node_info(args), loss, batch['uid'])
# Accumulated loss
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# if this forward step need model updates
# handle fp16
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Gradient clip: if max_grad_norm < 0
if (forward_step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.global_rank in [-1, 0] and args.eval_frequency > 0 and global_step % args.eval_frequency == 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
# End of epoch evaluation.
if args.global_rank in [-1, 0]:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def count_acc(gt_list, pred_list):
assert len(gt_list) == len(pred_list)
gt_dict = list_dict_data_tool.list_to_dict(gt_list, 'uid')
pred_list = list_dict_data_tool.list_to_dict(pred_list, 'uid')
total_count = 0
hit = 0
for key, value in pred_list.items():
if gt_dict[key]['label'] == value['predicted_label']:
hit += 1
total_count += 1
return hit, total_count
def evaluation_dataset(args, eval_dataloader, eval_list, model, r_dict, eval_name):
# r_dict = dict()
pred_output_list = eval_model(model, eval_dataloader, args.global_rank, args)
predictions = pred_output_list
hit, total = count_acc(eval_list, pred_output_list)
print(debug_node_info(args), f"{eval_name} Acc:", hit, total, hit / total)
r_dict[f'{eval_name}'] = {
'acc': hit / total,
'correct_count': hit,
'total_count': total,
'predictions': predictions,
}
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large", 'lstm-resencoder', "bag-of-words"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def debug_node_info(args):
names = ['global_rank', 'local_rank', 'node_rank']
values = []
for name in names:
if name in args:
values.append(getattr(args, name))
else:
return "Pro:No node info "
return "Pro:" + '|'.join([f"{name}:{value}" for name, value in zip(names, values)]) + "||Print:"
if __name__ == '__main__':
main()
|
anli-main
|
src/nli/training_extra.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from captum.attr import LayerIntegratedGradients
logger = logging.getLogger(__name__)
def summarize_attributions(attributions):
"""
Summarises the attribution across multiple runs
"""
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
def get_model_prediction(input_ids, attention_mask, token_type_ids, model, model_class_item, with_gradient=False):
model.eval()
if not with_gradient:
with torch.no_grad():
if model_class_item['model_class_name'] in ["distilbert", "bart-large"]:
outputs = model(input_ids,
attention_mask=attention_mask,
labels=None)
else:
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
else:
if model_class_item['model_class_name'] in ["distilbert", "bart-large"]:
outputs = model(input_ids,
attention_mask=attention_mask,
labels=None)
else:
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
return outputs[0]
def get_lig_object(model, model_class_item):
insight_supported = model_class_item['insight_supported'] if 'insight_supported' in model_class_item else False
internal_model_name = model_class_item['internal_model_name']
lig = None # default is None.
if not insight_supported:
logger.warning(f"Inspection for model '{model_class_item['model_class_name']}' is not supported.")
return lig
if isinstance(internal_model_name, list):
current_layer = model
for layer_n in internal_model_name:
current_layer = current_layer.__getattr__(layer_n)
# print(current_layer)
lig = LayerIntegratedGradients(get_model_prediction, current_layer)
else:
lig = LayerIntegratedGradients(get_model_prediction,
model.__getattr__(internal_model_name).embeddings.word_embeddings)
return lig
def get_tokenized_input_tokens(tokenizer, token_ids):
raw_words_list = tokenizer.convert_ids_to_tokens(token_ids)
string_tokens = [tokenizer.convert_tokens_to_string(word) for word in raw_words_list]
# still need some cleanup, remove space within tokens
output_tokens = []
for t in string_tokens:
output_tokens.append(t.replace(" ", ""))
return output_tokens
def cleanup_tokenization_special_tokens(tokens, importance, tokenizer):
filtered_tokens = []
filtered_importance = []
for t, i in zip(tokens, importance):
if t in tokenizer.all_special_tokens:
continue
else:
filtered_tokens.append(t)
filtered_importance.append(i)
return filtered_tokens, filtered_importance
|
anli-main
|
src/nli/inspection_tools.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
import config
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
from nli.training import MODEL_CLASSES, registered_path, build_eval_dataset_loader_and_sampler, NLITransform, \
NLIDataset, count_acc, evaluation_dataset, eval_model
import torch
import pprint
pp = pprint.PrettyPrinter(indent=2)
def evaluation():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
required=True
)
parser.add_argument(
"--model_checkpoint_path",
type=str,
help='Set the path to save the prediction.', required=True)
parser.add_argument(
"--output_prediction_path",
type=str,
default=None,
help='Set the path to save the prediction.')
parser.add_argument(
"--per_gpu_eval_batch_size", default=16, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=156, type=int, help="Max length of the sequences.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.global_rank = -1
else:
args.global_rank = 0
model_checkpoint_path = args.model_checkpoint_path
num_labels = 3
# we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
model.load_state_dict(torch.load(model_checkpoint_path))
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
eval_data_str = args.eval_data
eval_data_name = []
eval_data_path = []
eval_data_list = []
eval_data_named_path = eval_data_str.split(',')
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
if not args.cpu:
torch.cuda.set_device(0)
model.cuda(0)
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# save prediction:
if args.output_prediction_path is not None:
cur_results_path = Path(args.output_prediction_path)
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if __name__ == '__main__':
evaluation()
|
anli-main
|
src/nli/evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/nli/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
import uuid
import numpy as np
import config
from flint.data_utils.batchbuilder import move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
from nli.training import MODEL_CLASSES, registered_path, build_eval_dataset_loader_and_sampler, NLITransform, \
NLIDataset, count_acc, evaluation_dataset, eval_model
import torch
import pprint
pp = pprint.PrettyPrinter(indent=2)
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(np.asarray(x) - np.max(x))
return e_x / e_x.sum()
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=None)
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=None)
# print(outputs)
logits = outputs[0]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['probability'] = softmax(r_item['logits'])
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def inference(model_class_name, model_checkpoint_path, max_length, premise, hypothesis, cpu=True):
parser = argparse.ArgumentParser()
args = parser.parse_args()
# CPU for now
if cpu:
args.global_rank = -1
else:
args.global_rank = 0
model_checkpoint_path = model_checkpoint_path
args.model_class_name = model_class_name
num_labels = 3
# we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = max_length
model_class_item = MODEL_CLASSES[model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
model.load_state_dict(torch.load(model_checkpoint_path))
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_eval = 16
eval_data_list = [{
'uid': str(uuid.uuid4()),
'premise': premise,
'hypothesis': hypothesis,
'label': 'h' # hidden
}]
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_data_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
if not cpu:
torch.cuda.set_device(0)
model.cuda(0)
pred_output_list = eval_model(model, d_dataloader, args.global_rank, args)
# r_dict = dict()
# Eval loop:
# print(pred_output_list)
return pred_output_list[0]
if __name__ == '__main__':
# model_class_name = "roberta-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-22:16:24_roberta-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(24000)|snli_dev#(0.9252)|mnli_m_dev#(0.899)|mnli_mm_dev#(0.9002)|anli_r1_dev#(0.74)|anli_r1_test#(0.742)|anli_r2_dev#(0.506)|anli_r2_test#(0.498)|anli_r3_dev#(0.4667)|anli_r3_test#(0.455)/model.pt"
# model_class_name = "xlnet-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-23:04:33_xlnet-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(1)|i(30000)|snli_dev#(0.9274)|mnli_m_dev#(0.8981)|mnli_mm_dev#(0.8947)|anli_r1_dev#(0.735)|anli_r1_test#(0.701)|anli_r2_dev#(0.521)|anli_r2_test#(0.514)|anli_r3_dev#(0.5075)|anli_r3_test#(0.4975)/model.pt"
model_class_name = "albert-xxlarge"
model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-23:09:03_albert-xxlarge|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(16000)|snli_dev#(0.9246)|mnli_m_dev#(0.8948)|mnli_mm_dev#(0.8932)|anli_r1_dev#(0.733)|anli_r1_test#(0.711)|anli_r2_dev#(0.571)|anli_r2_test#(0.57)|anli_r3_dev#(0.5817)|anli_r3_test#(0.5375)/model.pt"
#
# model_class_name = "bart-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-30-08:23:44_bart-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(1)|i(40000)|snli_dev#(0.9298)|mnli_m_dev#(0.8941)|mnli_mm_dev#(0.8973)|anli_r1_dev#(0.736)|anli_r1_test#(0.72)|anli_r2_dev#(0.533)|anli_r2_test#(0.514)|anli_r3_dev#(0.5058)|anli_r3_test#(0.5042)/model.pt"
#
# model_class_name = "electra-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/08-02-08:58:05_electra-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(12000)|snli_dev#(0.9168)|mnli_m_dev#(0.8597)|mnli_mm_dev#(0.8661)|anli_r1_dev#(0.672)|anli_r1_test#(0.678)|anli_r2_dev#(0.536)|anli_r2_test#(0.522)|anli_r3_dev#(0.55)|anli_r3_test#(0.5217)/model.pt"
max_length = 184
premise = "Two women are embracing while holding to go packages."
hypothesis = "The men are fighting outside a deli."
pred_output = inference(model_class_name, model_checkpoint_path, max_length, premise, hypothesis, cpu=True)
print(pred_output)
|
anli-main
|
src/nli/inference_debug.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLNetTokenizer, XLNetForSequenceClassification
# from transformers import XLNetTokenizer
# from modeling.dummy_modeling_xlnet import XLNetForSequenceClassification
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from transformers import BartTokenizer, BartForSequenceClassification
from transformers import ElectraTokenizer, ElectraForSequenceClassification
from torch.utils.data import Dataset, DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import config
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from flint.data_utils.batchbuilder import BaseBatchBuilder, move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.nn as nn
import numpy as np
import random
import torch
from tqdm import tqdm
import math
import copy
import pprint
pp = pprint.PrettyPrinter(indent=2)
# from fairseq.data.data_utils import collate_tokens
MODEL_CLASSES = {
"bert-base": {
"model_name": "bert-base-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-large": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "bert",
'insight_supported': True,
},
"xlnet-base": {
"model_name": "xlnet-base-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
},
"xlnet-large": {
"model_name": "xlnet-large-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
'insight_supported': True,
},
"roberta-base": {
"model_name": "roberta-base",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"roberta-large": {
"model_name": "roberta-large",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"albert-xxlarge": {
"model_name": "albert-xxlarge-v2",
"tokenizer": AlbertTokenizer,
"sequence_classification": AlbertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "albert",
'insight_supported': True,
},
"distilbert": {
"model_name": "distilbert-base-cased",
"tokenizer": DistilBertTokenizer,
"sequence_classification": DistilBertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
},
"bart-large": {
"model_name": "facebook/bart-large",
"tokenizer": BartTokenizer,
"sequence_classification": BartForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": ["model", "encoder", "embed_tokens"],
'insight_supported': True,
},
"electra-base": {
"model_name": "google/electra-base-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
},
"electra-large": {
"model_name": "google/electra-large-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
}
}
registered_path = {
'snli_train': config.PRO_ROOT / "data/build/snli/train.jsonl",
'snli_dev': config.PRO_ROOT / "data/build/snli/dev.jsonl",
'snli_test': config.PRO_ROOT / "data/build/snli/test.jsonl",
'mnli_train': config.PRO_ROOT / "data/build/mnli/train.jsonl",
'mnli_m_dev': config.PRO_ROOT / "data/build/mnli/m_dev.jsonl",
'mnli_mm_dev': config.PRO_ROOT / "data/build/mnli/mm_dev.jsonl",
'fever_train': config.PRO_ROOT / "data/build/fever_nli/train.jsonl",
'fever_dev': config.PRO_ROOT / "data/build/fever_nli/dev.jsonl",
'fever_test': config.PRO_ROOT / "data/build/fever_nli/test.jsonl",
'anli_r1_train': config.PRO_ROOT / "data/build/anli/r1/train.jsonl",
'anli_r1_dev': config.PRO_ROOT / "data/build/anli/r1/dev.jsonl",
'anli_r1_test': config.PRO_ROOT / "data/build/anli/r1/test.jsonl",
'anli_r2_train': config.PRO_ROOT / "data/build/anli/r2/train.jsonl",
'anli_r2_dev': config.PRO_ROOT / "data/build/anli/r2/dev.jsonl",
'anli_r2_test': config.PRO_ROOT / "data/build/anli/r2/test.jsonl",
'anli_r3_train': config.PRO_ROOT / "data/build/anli/r3/train.jsonl",
'anli_r3_dev': config.PRO_ROOT / "data/build/anli/r3/dev.jsonl",
'anli_r3_test': config.PRO_ROOT / "data/build/anli/r3/test.jsonl",
}
nli_label2index = {
'e': 0,
'n': 1,
'c': 2,
'h': -1,
}
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class NLIDataset(Dataset):
def __init__(self, data_list, transform) -> None:
super().__init__()
self.d_list = data_list
self.len = len(self.d_list)
self.transform = transform
def __getitem__(self, index: int):
return self.transform(self.d_list[index])
# you should write schema for each of the input elements
def __len__(self) -> int:
return self.len
class NLITransform(object):
def __init__(self, model_name, tokenizer, max_length=None):
self.model_name = model_name
self.tokenizer = tokenizer
self.max_length = max_length
def __call__(self, sample):
processed_sample = dict()
processed_sample['uid'] = sample['uid']
processed_sample['gold_label'] = sample['label']
processed_sample['y'] = nli_label2index[sample['label']]
# premise: str = sample['premise']
premise: str = sample['context'] if 'context' in sample else sample['premise']
hypothesis: str = sample['hypothesis']
if premise.strip() == '':
premise = 'empty'
if hypothesis.strip() == '':
hypothesis = 'empty'
tokenized_input_seq_pair = self.tokenizer.encode_plus(premise, hypothesis,
max_length=self.max_length,
return_token_type_ids=True, truncation=True)
processed_sample.update(tokenized_input_seq_pair)
return processed_sample
def build_eval_dataset_loader_and_sampler(d_list, data_transformer, batching_schema, batch_size_per_gpu_eval):
d_dataset = NLIDataset(d_list, data_transformer)
d_sampler = SequentialSampler(d_dataset)
d_dataloader = DataLoader(dataset=d_dataset,
batch_size=batch_size_per_gpu_eval,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=d_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
return d_dataset, d_sampler, d_dataloader
def sample_data_list(d_list, ratio):
if ratio <= 0:
raise ValueError("Invalid training weight ratio. Please change --train_weights.")
upper_int = int(math.ceil(ratio))
if upper_int == 1:
return d_list # if ratio is 1 then we just return the data list
else:
sampled_d_list = []
for _ in range(upper_int):
sampled_d_list.extend(copy.deepcopy(d_list))
if np.isclose(ratio, upper_int):
return sampled_d_list
else:
sampled_length = int(ratio * len(d_list))
random.shuffle(sampled_d_list)
return sampled_d_list[:sampled_length]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument("--single_gpu", action="store_true", help="If set, we only use single GPU.")
parser.add_argument("--fp16", action="store_true", help="If set, we will use fp16.")
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# environment arguments
parser.add_argument('-s', '--seed', default=1, type=int, metavar='N',
help='manual random seed')
parser.add_argument('-n', '--num_nodes', default=1, type=int, metavar='N',
help='number of nodes')
parser.add_argument('-g', '--gpus_per_node', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--node_rank', default=0, type=int,
help='ranking within the nodes')
# experiments specific arguments
parser.add_argument('--debug_mode',
action='store_true',
dest='debug_mode',
help='weather this is debug mode or normal')
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
)
parser.add_argument(
"--experiment_name",
type=str,
help="Set the name of the experiment. [model_name]/[data]/[task]/[other]",
)
parser.add_argument(
"--save_prediction",
action='store_true',
dest='save_prediction',
help='Do we want to save prediction')
parser.add_argument(
"--resume_path",
type=str,
default=None,
help="If we want to resume model training, we need to set the resume path to restore state dicts.",
)
parser.add_argument(
"--global_iteration",
type=int,
default=0,
help="This argument is only used if we resume model training.",
)
parser.add_argument('--epochs', default=2, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--total_step', default=-1, type=int, metavar='N',
help='number of step to update, default calculate with total data size.'
'if we set this step, then epochs will be 100 to run forever.')
parser.add_argument('--sampler_seed', default=-1, type=int, metavar='N',
help='The seed the controls the data sampling order.')
parser.add_argument(
"--per_gpu_train_batch_size", default=16, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=64, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=160, type=int, help="Max length of the sequences.")
parser.add_argument("--warmup_steps", default=-1, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--eval_frequency", default=1000, type=int, help="set the evaluation frequency, evaluate every X global step.",
)
parser.add_argument("--train_data",
type=str,
help="The training data used in the experiments.")
parser.add_argument("--train_weights",
type=str,
help="The training data weights used in the experiments.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.world_size = 1
train(-1, args)
elif args.single_gpu:
args.world_size = 1
train(0, args)
else: # distributed multiGPU training
#########################################################
args.world_size = args.gpus_per_node * args.num_nodes #
# os.environ['MASTER_ADDR'] = '152.2.142.184' # This is the IP address for nlp5
# maybe we will automatically retrieve the IP later.
os.environ['MASTER_PORT'] = '88888' #
mp.spawn(train, nprocs=args.gpus_per_node, args=(args,)) # spawn how many process in this node
# remember train is called as train(i, args).
#########################################################
def train(local_rank, args):
# debug = False
# print("GPU:", gpu)
# world_size = args.world_size
args.global_rank = args.node_rank * args.gpus_per_node + local_rank
args.local_rank = local_rank
# args.warmup_steps = 20
debug_count = 1000
if args.total_step > 0:
num_epoch = 10000 # if we set total step, num_epoch will be forever.
else:
num_epoch = args.epochs
actual_train_batch_size = args.world_size * args.per_gpu_train_batch_size * args.gradient_accumulation_steps
args.actual_train_batch_size = actual_train_batch_size
set_seed(args.seed)
num_labels = 3 # we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_train = args.per_gpu_train_batch_size
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
if not args.cpu and not args.single_gpu:
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=args.global_rank
)
train_data_str = args.train_data
train_data_weights_str = args.train_weights
eval_data_str = args.eval_data
train_data_name = []
train_data_path = []
train_data_list = []
train_data_weights = []
eval_data_name = []
eval_data_path = []
eval_data_list = []
train_data_named_path = train_data_str.split(',')
weights_str = train_data_weights_str.split(',') if train_data_weights_str is not None else None
eval_data_named_path = eval_data_str.split(',')
for named_path in train_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = named_path[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
train_data_name.append(name)
train_data_path.append(path)
train_data_list.append(d_list)
if weights_str is not None:
for weights in weights_str:
train_data_weights.append(float(weights))
else:
for i in range(len(train_data_list)):
train_data_weights.append(1)
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = named_path[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
assert len(train_data_weights) == len(train_data_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
# data_transformer = NLITransform(model_name, tokenizer, max_length, with_element=True)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
# Estimate the training size:
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
estimated_training_size = len(training_list)
print("Estimated training size:", estimated_training_size)
# Estimate the training size ends:
# t_total = estimated_training_size // args.gradient_accumulation_steps * num_epoch
# t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
if args.total_step <= 0:
t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
else:
t_total = args.total_step
if args.warmup_steps <= 0: # set the warmup steps to 0.1 * total step if the given warmup step is -1.
args.warmup_steps = int(t_total * 0.1)
if not args.cpu:
torch.cuda.set_device(args.local_rank)
model.cuda(args.local_rank)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
global_step = 0
if args.resume_path:
print("Resume Training")
global_step = args.global_iteration
print("Resume Global Step: ", global_step)
model.load_state_dict(torch.load(str(Path(args.resume_path) / "model.pt"), map_location=torch.device('cpu')))
optimizer.load_state_dict(torch.load(str(Path(args.resume_path) / "optimizer.pt"), map_location=torch.device('cpu')))
scheduler.load_state_dict(torch.load(str(Path(args.resume_path) / "scheduler.pt"), map_location=torch.device('cpu')))
print("State Resumed")
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if not args.cpu and not args.single_gpu:
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
args_dict = dict(vars(args))
file_path_prefix = '.'
if args.global_rank in [-1, 0]:
print("Total Steps:", t_total)
args.total_step = t_total
print("Warmup Steps:", args.warmup_steps)
print("Actual Training Batch Size:", actual_train_batch_size)
print("Arguments", pp.pprint(args))
is_finished = False
# Let build the logger and log everything before the start of the first training epoch.
if args.global_rank in [-1, 0]: # only do logging if we use cpu or global_rank=0
resume_prefix = ""
# if args.resume_path:
# resume_prefix = "resumed_"
if not args.debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{args.experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save option file
common.save_json(args_dict, os.path.join(file_path_prefix, "args.json"))
checkpoints_path = Path(file_path_prefix) / "checkpoints"
if not checkpoints_path.exists():
checkpoints_path.mkdir()
prediction_path = Path(file_path_prefix) / "predictions"
if not prediction_path.exists():
prediction_path.mkdir()
# if this is a resumed, then we save the resumed path.
if args.resume_path:
with open(os.path.join(file_path_prefix, "resume_log.txt"), 'w') as out_f:
out_f.write(str(args.resume_path))
out_f.flush()
# print(f"Global Rank:{args.global_rank} ### ", 'Init!')
for epoch in tqdm(range(num_epoch), desc="Epoch", disable=args.global_rank not in [-1, 0]):
# Let's build up training dataset for this epoch
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
random.shuffle(training_list)
train_dataset = NLIDataset(training_list, data_transformer)
train_sampler = SequentialSampler(train_dataset)
if not args.cpu and not args.single_gpu:
print("Use distributed sampler.")
train_sampler = DistributedSampler(train_dataset, args.world_size, args.global_rank,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size_per_gpu_train,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=train_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
# training build finished.
print(debug_node_info(args), "epoch: ", epoch)
if not args.cpu and not args.single_gpu:
if args.sampler_seed == -1:
train_sampler.set_epoch(epoch) # setup the epoch to ensure random sampling at each epoch
else:
train_sampler.set_epoch(epoch + args.sampler_seed)
for forward_step, batch in enumerate(tqdm(train_dataloader, desc="Iteration",
disable=args.global_rank not in [-1, 0]), 0):
model.train()
batch = move_to_device(batch, local_rank)
# print(batch['input_ids'], batch['y'])
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
# print(debug_node_info(args), loss, logits, batch['uid'])
# print(debug_node_info(args), loss, batch['uid'])
# Accumulated loss
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# if this forward step need model updates
# handle fp16
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Gradient clip: if max_grad_norm < 0
if (forward_step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.global_rank in [-1, 0] and args.eval_frequency > 0 and global_step % args.eval_frequency == 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if args.total_step > 0 and global_step == t_total:
# if we set total step and global step s t_total.
is_finished = True
break
# End of epoch evaluation.
if args.global_rank in [-1, 0] and args.total_step <= 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if is_finished:
break
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def count_acc(gt_list, pred_list):
assert len(gt_list) == len(pred_list)
gt_dict = list_dict_data_tool.list_to_dict(gt_list, 'uid')
pred_list = list_dict_data_tool.list_to_dict(pred_list, 'uid')
total_count = 0
hit = 0
for key, value in pred_list.items():
if gt_dict[key]['label'] == value['predicted_label']:
hit += 1
total_count += 1
return hit, total_count
def evaluation_dataset(args, eval_dataloader, eval_list, model, r_dict, eval_name):
# r_dict = dict()
pred_output_list = eval_model(model, eval_dataloader, args.global_rank, args)
predictions = pred_output_list
hit, total = count_acc(eval_list, pred_output_list)
print(debug_node_info(args), f"{eval_name} Acc:", hit, total, hit / total)
r_dict[f'{eval_name}'] = {
'acc': hit / total,
'correct_count': hit,
'total_count': total,
'predictions': predictions,
}
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def debug_node_info(args):
names = ['global_rank', 'local_rank', 'node_rank']
values = []
for name in names:
if name in args:
values.append(getattr(args, name))
else:
return "Pro:No node info "
return "Pro:" + '|'.join([f"{name}:{value}" for name, value in zip(names, values)]) + "||Print:"
if __name__ == '__main__':
main()
|
anli-main
|
src/nli/training.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import config
from datetime import datetime
from utils import common
class ScoreLogger(object):
def __init__(self, init_tracking_dict) -> None:
super().__init__()
self.logging_item_list = []
self.score_tracker = dict()
self.score_tracker.update(init_tracking_dict)
def incorporate_results(self, score_dict, save_key, item=None) -> bool:
assert len(score_dict.keys()) == len(self.score_tracker.keys())
for fieldname in score_dict.keys():
assert fieldname in self.score_tracker
valid_improvement = False
for fieldname, value in score_dict.items():
if score_dict[fieldname] >= self.score_tracker[fieldname]:
self.score_tracker[fieldname] = score_dict[fieldname]
valid_improvement = True
self.logging_item_list.append({'k': save_key, 'v': item})
return valid_improvement
def logging_to_file(self, filename):
if Path(filename).is_file():
old_logging_list = common.load_json(filename)
current_saved_key = set()
for item in self.logging_item_list:
current_saved_key.add(item['k'])
for item in old_logging_list:
if item['k'] not in current_saved_key:
raise ValueError("Previous logged item can not be found!")
common.save_json(self.logging_item_list, filename, indent=2, sort_keys=True)
def gen_file_prefix(model_name, directory_name='saved_models', date=None):
date_now = datetime.now().strftime("%m-%d-%H:%M:%S") if not date else date
file_path = os.path.join(config.PRO_ROOT / directory_name / '_'.join((date_now, model_name)))
if not os.path.exists(file_path):
os.makedirs(file_path)
return file_path, date_now
def get_cur_time_str():
date_now = datetime.now().strftime("%m-%d[%H:%M:%S]")
return date_now
if __name__ == "__main__":
# print(gen_file_prefix("this_is_my_model."))
# print(get_cur_time_str())
score_logger = ScoreLogger({'a_score': -1, 'b_score': -1})
print(score_logger.incorporate_results({'a_score': 2, 'b_score': -1}, 'key-1', {'a_score': 2, 'b_score': -1}))
print(score_logger.incorporate_results({'a_score': 2, 'b_score': 3}, 'key-2', {'a_score': 2, 'b_score': 3}))
print(score_logger.incorporate_results({'a_score': 2, 'b_score': 4}, 'key-2', {'a_score': 2, 'b_score': 4}))
print(score_logger.incorporate_results({'a_score': 1, 'b_score': 2}, 'key-2', {'a_score': 1, 'b_score': 2}))
print(score_logger.score_tracker)
score_logger.logging_to_file('for_testing.json')
|
anli-main
|
src/utils/save_tool.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/utils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import uuid
def list_to_dict(d_list, key_fields): # '_id' or 'pid'
d_dict = dict()
for item in d_list:
assert key_fields in item
d_dict[item[key_fields]] = item
return d_dict
def dict_to_list(d_dict):
d_list = []
for key, value in d_dict.items():
d_list.append(value)
return d_list
def append_item_from_dict_to_list(d_list, d_dict, key_fieldname, append_fieldnames):
if not isinstance(append_fieldnames, list):
append_fieldnames = [append_fieldnames]
for item in d_list:
key = item[key_fieldname]
if key in d_dict:
for append_fieldname in append_fieldnames:
item[append_fieldname] = d_dict[key][append_fieldname]
else:
print(f"Potential Error: {key} not in scored_dict. Maybe bc all forward items are empty.")
for append_fieldname in append_fieldnames:
item[append_fieldname] = []
return d_list
def append_item_from_dict_to_list_hotpot_style(d_list, d_dict, key_fieldname, append_fieldnames):
if not isinstance(append_fieldnames, list):
append_fieldnames = [append_fieldnames]
for item in d_list:
key = item[key_fieldname]
for append_fieldname in append_fieldnames:
if key in d_dict[append_fieldname]:
item[append_fieldname] = d_dict[append_fieldname][key]
else:
print(f"Potential Error: {key} not in scored_dict. Maybe bc all forward items are empty.")
# for append_fieldname in append_fieldnames:
item[append_fieldname] = []
return d_list
def append_subfield_from_list_to_dict(subf_list, d_dict, o_key_field_name, subfield_key_name,
subfield_name='merged_field', check=False):
# Often times, we will need to split the one data point to multiple items to be feeded into neural networks
# and after we obtain the results we will need to map the results back to original data point with some keys.
# This method is used for this purpose.
# The method can be invoke multiple times, (in practice usually one batch per time.)
"""
:param subf_list: The forward list.
:param d_dict: The dict that contain keys mapping to original data point.
:param o_key_field_name: The fieldname of original data point key. 'pid'
:param subfield_key_name: The fieldname of the sub item. 'fid'
:param subfield_name: The merge field name. 'merged_field'
:param check:
:return:
"""
for key in d_dict.keys():
d_dict[key][subfield_name] = dict()
for item in subf_list:
assert o_key_field_name in item
assert subfield_key_name in item
map_id = item[o_key_field_name]
sub_filed_id = item[subfield_key_name]
assert map_id in d_dict
# if subfield_name not in d_dict[map_id]:
# d_dict[map_id][subfield_name] = dict()
if sub_filed_id not in d_dict[map_id][subfield_name]:
if check:
assert item[o_key_field_name] == map_id
d_dict[map_id][subfield_name][sub_filed_id] = item
else:
print("Duplicate forward item with key:", sub_filed_id)
return d_dict
if __name__ == '__main__':
oitems = []
for i in range(3):
oitems.append({'_id': i})
fitems = []
for item in oitems:
oid = item['_id']
for i in range(int(oid) + 1):
fid = str(uuid.uuid4())
fitems.append({
'oid': oid,
'fid': fid,
})
o_dict = list_to_dict(oitems, '_id')
append_subfield_from_list_to_dict(fitems, o_dict, 'oid', 'fid', check=True)
print(fitems)
print(o_dict)
|
anli-main
|
src/utils/list_dict_data_tool.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import json
from json import JSONEncoder
from tqdm import tqdm
import config
registered_jsonabl_classes = {}
# Some Jsonable classes, for easy json serialization.
def register_class(cls):
global registered_jsonabl_classes
if cls not in registered_jsonabl_classes:
registered_jsonabl_classes.update({cls.__name__: cls})
class JsonableObj(object):
pass
class JsonableObjectEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, JsonableObj):
d = {'_jcls_': type(o).__name__}
d.update(vars(o))
return d
else:
return super().default(o)
def unserialize_JsonableObject(d):
global registered_jsonabl_classes
classname = d.pop('_jcls_', None)
if classname:
cls = registered_jsonabl_classes[classname]
obj = cls.__new__(cls) # Make instance without calling __init__
for key, value in d.items():
setattr(obj, key, value)
return obj
else:
return d
def json_dumps(item):
return json.dumps(item, cls=JsonableObjectEncoder)
def json_loads(item_str):
return json.loads(item_str, object_hook=unserialize_JsonableObject)
# Json Serializable object finished.
def save_jsonl(d_list, filename):
print("Save to Jsonl:", filename)
with open(filename, encoding='utf-8', mode='w') as out_f:
for item in d_list:
out_f.write(json.dumps(item, cls=JsonableObjectEncoder) + '\n')
def load_jsonl(filename, debug_num=None):
d_list = []
with open(filename, encoding='utf-8', mode='r') as in_f:
print("Load Jsonl:", filename)
for line in tqdm(in_f):
item = json.loads(line.strip(), object_hook=unserialize_JsonableObject)
d_list.append(item)
if debug_num is not None and 0 < debug_num == len(d_list):
break
return d_list
def load_json(filename, **kwargs):
with open(filename, encoding='utf-8', mode='r') as in_f:
return json.load(in_f, object_hook=unserialize_JsonableObject, **kwargs)
def save_json(obj, filename, **kwargs):
with open(filename, encoding='utf-8', mode='w') as out_f:
json.dump(obj, out_f, cls=JsonableObjectEncoder, **kwargs)
out_f.close()
|
anli-main
|
src/utils/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/modeling/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
""" PyTorch Dummy XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlnet-base-cased",
"xlnet-large-cased",
# See all XLNet models at https://huggingface.co/models?filter=xlnet
]
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat=None,
attn_mask=None,
head_mask=None,
output_attentions=False,
):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
`use_cache` has to be set to `True` to make use of `mems`.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (:obj:`bool`):
If `use_cache` is True, `mems` are returned and can be used to speed up decoding (see `mems`). Defaults to `True`.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(self.device)
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(self.device)
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = self.dtype
device = self.device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
# Important
# input_id is input_ids.transpose(0, 1).contiguous()
input_ids = input_ids.transpose(0, 1)
word_emb_k = self.word_embedding(input_ids)
# reverse the function
word_emb_k = word_emb_k.transpose(0, 1)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and use_cache is True:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and use_cache is True:
outputs = outputs + (new_mems,)
if output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {
"input_ids": input_ids,
"perm_mask": perm_mask,
"target_mapping": target_mapping,
"use_cache": kwargs["use_cache"],
}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
>>> from transformers import XLNetTokenizer, XLNetForQuestionAnswering
>>> import torch
>>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
>>> model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
|
anli-main
|
src/modeling/dummy_modeling_xlnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch import optim
from torch.autograd import Variable
from torch.nn import MSELoss, CrossEntropyLoss
import flint.torch_util as torch_util
from tqdm import tqdm
import os
from datetime import datetime
class EmptyScheduler(object):
def __init__(self):
self._state_dict = dict()
def step(self):
pass
def state_dict(self):
return self._state_dict
class ResEncoder(nn.Module):
def __init__(self, h_size=[1024, 1024, 1024], v_size=10, embd_dim=300, mlp_d=1024,
dropout_r=0.1, k=3, n_layers=1, num_labels=3):
super(ResEncoder, self).__init__()
self.Embd = nn.Embedding(v_size, embd_dim)
self.num_labels = num_labels
self.lstm = nn.LSTM(input_size=embd_dim, hidden_size=h_size[0],
num_layers=1, bidirectional=True)
self.lstm_1 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[1],
num_layers=1, bidirectional=True)
self.lstm_2 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[2],
num_layers=1, bidirectional=True)
self.h_size = h_size
self.k = k
# self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
self.mlp_1 = nn.Linear(h_size[2] * 2, mlp_d)
self.mlp_2 = nn.Linear(mlp_d, mlp_d)
self.sm = nn.Linear(mlp_d, self.num_labels)
if n_layers == 1:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
elif n_layers == 2:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
else:
print("Error num layers")
def init_embedding(self, embedding):
self.Embd.weight = embedding.weight
def forward(self, input_ids, attention_mask, labels=None):
# if self.max_l:
# l1 = l1.clamp(max=self.max_l)
# l2 = l2.clamp(max=self.max_l)
# if s1.size(0) > self.max_l:
# s1 = s1[:self.max_l, :]
# if s2.size(0) > self.max_l:
# s2 = s2[:self.max_l, :]
batch_l_1 = torch.sum(attention_mask, dim=1)
# p_s1 = self.Embd(s1)
embedding_1 = self.Embd(input_ids)
s1_layer1_out = torch_util.auto_rnn(self.lstm, embedding_1, batch_l_1)
# s2_layer1_out = torch_util.auto_rnn_bilstm(self.lstm, p_s2, l2)
# Length truncate
# len1 = s1_layer1_out.size(0)
# len2 = s2_layer1_out.size(0)
# p_s1 = p_s1[:len1, :, :]
# p_s2 = p_s2[:len2, :, :]
# Using high way
s1_layer2_in = torch.cat([embedding_1, s1_layer1_out], dim=2)
# s2_layer2_in = torch.cat([p_s2, s2_layer1_out], dim=2)
s1_layer2_out = torch_util.auto_rnn(self.lstm_1, s1_layer2_in, batch_l_1)
# s2_layer2_out = torch_util.auto_rnn_bilstm(self.lstm_1, s2_layer2_in, l2)
s1_layer3_in = torch.cat([embedding_1, s1_layer1_out + s1_layer2_out], dim=2)
# s2_layer3_in = torch.cat([p_s2, s2_layer1_out + s2_layer2_out], dim=2)
s1_layer3_out = torch_util.auto_rnn(self.lstm_2, s1_layer3_in, batch_l_1)
# s2_layer3_out = torch_util.auto_rnn_bilstm(self.lstm_2, s2_layer3_in, l2)
s1_layer3_maxout = torch_util.max_along_time(s1_layer3_out, batch_l_1)
# s2_layer3_maxout = torch_util.max_along_time(s2_layer3_out, l2)
# Only use the last layer
# features = torch.cat([s1_layer3_maxout, s2_layer3_maxout,
# torch.abs(s1_layer3_maxout - s2_layer3_maxout),
# s1_layer3_maxout * s2_layer3_maxout],
# dim=1)
features = torch.cat([s1_layer3_maxout],
dim=1)
logits = self.classifier(features)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return (loss, logits)
class BagOfWords(nn.Module):
def __init__(self, v_size=10, embd_dim=300, mlp_d=1024,
dropout_r=0.1, n_layers=1, num_labels=3):
super(BagOfWords, self).__init__()
self.Embd = nn.Embedding(v_size, embd_dim)
self.num_labels = num_labels
# self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
self.mlp_1 = nn.Linear(embd_dim, mlp_d)
self.mlp_2 = nn.Linear(mlp_d, mlp_d)
self.sm = nn.Linear(mlp_d, self.num_labels)
if n_layers == 1:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
elif n_layers == 2:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
else:
print("Error num layers")
def init_embedding(self, embedding):
self.Embd.weight = embedding.weight
def forward(self, input_ids, attention_mask, labels=None):
# if self.max_l:
# l1 = l1.clamp(max=self.max_l)
# l2 = l2.clamp(max=self.max_l)
# if s1.size(0) > self.max_l:
# s1 = s1[:self.max_l, :]
# if s2.size(0) > self.max_l:
# s2 = s2[:self.max_l, :]
batch_l_1 = torch.sum(attention_mask, dim=1)
# p_s1 = self.Embd(s1)
embedding_1 = self.Embd(input_ids)
s1_layer3_maxout = torch_util.avg_along_time(embedding_1, batch_l_1)
# s2_layer3_maxout = torch_util.max_along_time(s2_layer3_out, l2)
# Only use the last layer
# features = torch.cat([s1_layer3_maxout, s2_layer3_maxout,
# torch.abs(s1_layer3_maxout - s2_layer3_maxout),
# s1_layer3_maxout * s2_layer3_maxout],
# dim=1)
features = torch.cat([s1_layer3_maxout],
dim=1)
logits = self.classifier(features)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return (loss, logits)
|
anli-main
|
src/modeling/res_encoder.py
|
anli-main
|
src/hg_api/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
def evaluate(tokenizer, model, premise, hypothesis):
max_length = 256
tokenized_input_seq_pair = tokenizer.encode_plus(premise, hypothesis,
max_length=max_length,
return_token_type_ids=True, truncation=True)
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
# remember bart doesn't have 'token_type_ids', remove the line below if you are using bart.
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
# Note:
# "id2label": {
# "0": "entailment",
# "1": "neutral",
# "2": "contradiction"
# },
predicted_probability = torch.softmax(outputs[0], dim=1)[0].tolist() # batch_size only one
#print("Premise:", premise)
#print("Hypothesis:", hypothesis)
print("Prediction:")
print("Entailment:", predicted_probability[0])
print("Neutral:", predicted_probability[1])
print("Contradiction:", predicted_probability[2])
print("="*20)
if __name__ == '__main__':
print("Loading model...")
# hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
print("Model loaded!")
while True:
premise = input("Premise> ")
hypothesis = input("Hypothesis> ")
evaluate(tokenizer, model, premise, hypothesis)
|
anli-main
|
src/hg_api/interactive.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import json
def get_prediction(tokenizer, model, premise, hypothesis, max_length=256):
tokenized_input_seq_pair = tokenizer.encode_plus(premise, hypothesis,
max_length=max_length,
return_token_type_ids=True, truncation=True)
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
predicted_probability = torch.softmax(outputs[0], dim=1)[0] # batch_size only one
predicted_index = torch.argmax(predicted_probability)
predicted_probability = predicted_probability.tolist()
return predicted_probability, predicted_index
if __name__ == '__main__':
premise = "Two women are embracing while holding to go packages."
hypothesis = "The men are fighting outside a deli."
hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
snli_dev = []
SNLI_DEV_FILE_PATH = "../../data/snli_1.0/snli_1.0_dev.jsonl" # you can change this to other path.
with open(SNLI_DEV_FILE_PATH, mode='r', encoding='utf-8') as in_f:
for line in in_f:
if line:
cur_item = json.loads(line)
if cur_item['gold_label'] != '-':
snli_dev.append(cur_item)
total = 0
correct = 0
label_mapping = {
0: 'entailment',
1: 'neutral',
2: 'contradiction',
}
print("Start evaluating...") # this might take a while.
for item in snli_dev:
_, pred_index = get_prediction(tokenizer, model, item['sentence1'], item['sentence2'])
if label_mapping[int(pred_index)] == item['gold_label']:
correct += 1
total += 1
if total % 200 == 0 and total != 0:
print(f"{total} finished.")
print("Total / Correct / Accuracy:", f"{total} / {correct} / {correct / total}")
|
anli-main
|
src/hg_api/interactive_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/dataset_tools/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import config
from dataset_tools.format_convert import sm_nli2std_format, fever_nli2std_format, a_nli2std_format
from utils import common
# ANLI_VERSION = 1.0
def build_snli(path: Path):
snli_data_root_path = (path / "snli")
if not snli_data_root_path.exists():
snli_data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_train.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_dev.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_test.jsonl")
d_trian = sm_nli2std_format(o_train)
d_dev = sm_nli2std_format(o_dev)
d_test = sm_nli2std_format(o_test)
print("SNLI examples without gold label have been filtered.")
print("SNLI Train size:", len(d_trian))
print("SNLI Dev size:", len(d_dev))
print("SNLI Test size:", len(d_test))
common.save_jsonl(d_trian, snli_data_root_path / 'train.jsonl')
common.save_jsonl(d_dev, snli_data_root_path / 'dev.jsonl')
common.save_jsonl(d_test, snli_data_root_path / 'test.jsonl')
def build_mnli(path: Path):
data_root_path = (path / "mnli")
if not data_root_path.exists():
data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_train.jsonl")
o_mm_dev = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_dev_mismatched.jsonl")
o_m_dev = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_dev_matched.jsonl")
d_trian = sm_nli2std_format(o_train)
d_mm_dev = sm_nli2std_format(o_mm_dev)
d_m_test = sm_nli2std_format(o_m_dev)
print("MNLI examples without gold label have been filtered.")
print("MNLI Train size:", len(d_trian))
print("MNLI MisMatched Dev size:", len(d_mm_dev))
print("MNLI Matched dev size:", len(d_m_test))
common.save_jsonl(d_trian, data_root_path / 'train.jsonl')
common.save_jsonl(d_mm_dev, data_root_path / 'mm_dev.jsonl')
common.save_jsonl(d_m_test, data_root_path / 'm_dev.jsonl')
def build_fever_nli(path: Path):
data_root_path = (path / "fever_nli")
if not data_root_path.exists():
data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/train_fitems.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/dev_fitems.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/test_fitems.jsonl")
d_trian = fever_nli2std_format(o_train)
d_dev = fever_nli2std_format(o_dev)
d_test = fever_nli2std_format(o_test)
print("FEVER-NLI Train size:", len(d_trian))
print("FEVER-NLI Dev size:", len(d_dev))
print("FEVER-NLI Test size:", len(d_test))
common.save_jsonl(d_trian, data_root_path / 'train.jsonl')
common.save_jsonl(d_dev, data_root_path / 'dev.jsonl')
common.save_jsonl(d_test, data_root_path / 'test.jsonl')
def build_anli(path: Path, round=1, version='1.0'):
data_root_path = (path / "anli")
if not data_root_path.exists():
data_root_path.mkdir()
round_tag = str(round)
o_train = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/train.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/dev.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/test.jsonl")
d_trian = a_nli2std_format(o_train)
d_dev = a_nli2std_format(o_dev)
d_test = a_nli2std_format(o_test)
print(f"ANLI (R{round_tag}) Train size:", len(d_trian))
print(f"ANLI (R{round_tag}) Dev size:", len(d_dev))
print(f"ANLI (R{round_tag}) Test size:", len(d_test))
if not (data_root_path / f"r{round_tag}").exists():
(data_root_path / f"r{round_tag}").mkdir()
common.save_jsonl(d_trian, data_root_path / f"r{round_tag}" / 'train.jsonl')
common.save_jsonl(d_dev, data_root_path / f"r{round_tag}" / 'dev.jsonl')
common.save_jsonl(d_test, data_root_path / f"r{round_tag}" / 'test.jsonl')
def build_data():
processed_data_root = config.PRO_ROOT / "data" / "build"
if not processed_data_root.exists():
processed_data_root.mkdir()
build_snli(processed_data_root)
build_mnli(processed_data_root)
build_fever_nli(processed_data_root)
for round in [1, 2, 3]:
build_anli(processed_data_root, round)
print("NLI data built!")
if __name__ == '__main__':
build_data()
|
anli-main
|
src/dataset_tools/build_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from utils import common
from typing import List, Dict
from tqdm import tqdm
from collections import defaultdict
import config
from pathlib import Path
smnli_label2std_label = defaultdict(lambda: "o") # o stands for all other label that is invalid.
smnli_label2std_label.update({
"entailment": "e",
"neutral": "n",
"contradiction": "c",
"hidden": "h",
})
fever_label2std_label = defaultdict(lambda: "o")
fever_label2std_label.update({
'SUPPORTS': "e",
'NOT ENOUGH INFO': "n",
'REFUTES': "c",
'hidden': "h",
})
anli_label2std_label = defaultdict(lambda: "o")
anli_label2std_label.update({
'e': "e",
'n': "n",
'c': "c",
'hidden': "h",
})
# standard output format: {uid, premise, hypothesis, label, extra_dataset_related_field.}
def sm_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["pairID"]
formatted_item['premise']: str = item["sentence1"]
formatted_item['hypothesis']: str = item["sentence2"]
formatted_item['label']: str = smnli_label2std_label[item["gold_label"]]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
def fever_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["fid"]
formatted_item['premise']: str = item["context"]
formatted_item['hypothesis']: str = item["query"]
formatted_item['label']: str = fever_label2std_label[item["label"]]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
def a_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["uid"]
formatted_item['premise']: str = item["context"]
formatted_item['hypothesis']: str = item["hypothesis"]
formatted_item['label']: str = anli_label2std_label[item["label"]]
formatted_item['reason']: str = item["reason"]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
if __name__ == '__main__':
pass
|
anli-main
|
src/dataset_tools/format_convert.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import functools
# def get_length_and_mask(seq):
# len_mask = (seq != 0).long()
# len_t = get_lengths_from_binary_sequence_mask(len_mask)
# return len_mask, len_t
def length_truncate(seq, max_l, is_elmo=False):
def _truncate(seq):
if seq.size(1) > max_l:
return seq[:, :max_l, ...]
else:
return seq
if not is_elmo:
return _truncate(seq)
else:
s1_elmo_embd = dict()
s1_elmo_embd['mask'] = _truncate(seq['mask'])
s1_elmo_embd['elmo_representations'] = []
for e_rep in seq['elmo_representations']:
s1_elmo_embd['elmo_representations'].append(_truncate(e_rep))
return s1_elmo_embd
def pad_1d(seq, pad_l):
"""
The seq is a sequence having shape [T, ..]. Note: The seq contains only one instance. This is not batched.
:param seq: Input sequence with shape [T, ...]
:param pad_l: The required pad_length.
:return: Output sequence will have shape [Pad_L, ...]
"""
l = seq.size(0)
if l >= pad_l:
return seq[:pad_l, ] # Truncate the length if the length is bigger than required padded_length.
else:
pad_seq = Variable(seq.data.new(pad_l - l, *seq.size()[1:]).zero_()) # Requires_grad is False
return torch.cat([seq, pad_seq], dim=0)
def get_state_shape(rnn: nn.RNN, batch_size, bidirectional=False):
"""
Return the state shape of a given RNN. This is helpful when you want to create a init state for RNN.
Example:
c0 = h0 = Variable(src_seq_p.data.new(*get_state_shape([your rnn], 3, bidirectional)).zero_())
:param rnn: nn.LSTM, nn.GRU or subclass of nn.RNN
:param batch_size:
:param bidirectional:
:return:
"""
if bidirectional:
return rnn.num_layers * 2, batch_size, rnn.hidden_size
else:
return rnn.num_layers, batch_size, rnn.hidden_size
def pack_list_sequence(inputs, l, max_l=None, batch_first=True):
"""
Pack a batch of Tensor into one Tensor with max_length.
:param inputs:
:param l:
:param max_l: The max_length of the packed sequence.
:param batch_first:
:return:
"""
batch_list = []
max_l = max(list(l)) if not max_l else max_l
batch_size = len(inputs)
for b_i in range(batch_size):
batch_list.append(pad_1d(inputs[b_i], max_l))
pack_batch_list = torch.stack(batch_list, dim=1) if not batch_first \
else torch.stack(batch_list, dim=0)
return pack_batch_list
def pack_for_rnn_seq(inputs, lengths, batch_first=True, states=None):
"""
:param states: [rnn.num_layers, batch_size, rnn.hidden_size]
:param inputs: Shape of the input should be [B, T, D] if batch_first else [T, B, D].
:param lengths: [B]
:param batch_first:
:return:
"""
if not batch_first:
_, sorted_indices = lengths.sort()
'''
Reverse to decreasing order
'''
r_index = reversed(list(sorted_indices))
s_inputs_list = []
lengths_list = []
reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)
for j, i in enumerate(r_index):
s_inputs_list.append(inputs[:, i, :].unsqueeze(1))
lengths_list.append(lengths[i])
reverse_indices[i] = j
reverse_indices = list(reverse_indices)
s_inputs = torch.cat(s_inputs_list, 1)
packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list)
return packed_seq, reverse_indices
else:
_, sorted_indices = lengths.sort()
'''
Reverse to decreasing order
'''
r_index = reversed(list(sorted_indices))
s_inputs_list = []
lengths_list = []
reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)
if states is None:
states = ()
elif not isinstance(states, tuple):
states = (states,) # rnn.num_layers, batch_size, rnn.hidden_size
states_lists = tuple([] for _ in states)
for j, i in enumerate(r_index):
s_inputs_list.append(inputs[i, :, :])
lengths_list.append(lengths[i])
reverse_indices[i] = j
for state_list, state in zip(states_lists, states):
state_list.append(state[:, i, :].unsqueeze(1))
reverse_indices = list(reverse_indices)
s_inputs = torch.stack(s_inputs_list, dim=0)
packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list, batch_first=batch_first)
r_states = tuple(torch.cat(state_list, dim=1) for state_list in states_lists)
if len(r_states) == 1:
r_states = r_states[0]
return packed_seq, reverse_indices, r_states
def unpack_from_rnn_seq(packed_seq, reverse_indices, batch_first=True):
unpacked_seq, _ = nn.utils.rnn.pad_packed_sequence(packed_seq, batch_first=batch_first)
s_inputs_list = []
if not batch_first:
for i in reverse_indices:
s_inputs_list.append(unpacked_seq[:, i, :].unsqueeze(1))
return torch.cat(s_inputs_list, 1)
else:
for i in reverse_indices:
s_inputs_list.append(unpacked_seq[i, :, :].unsqueeze(0))
return torch.cat(s_inputs_list, 0)
def reverse_indice_for_state(states, reverse_indices):
"""
:param states: [rnn.num_layers, batch_size, rnn.hidden_size]
:param reverse_indices: [batch_size]
:return:
"""
if states is None:
states = ()
elif not isinstance(states, tuple):
states = (states,) # rnn.num_layers, batch_size, rnn.hidden_size
states_lists = tuple([] for _ in states)
for i in reverse_indices:
for state_list, state in zip(states_lists, states):
state_list.append(state[:, i, :].unsqueeze(1))
r_states = tuple(torch.cat(state_list, dim=1) for state_list in states_lists)
if len(r_states) == 1:
r_states = r_states[0]
return r_states
def auto_rnn(rnn: nn.RNN, seqs, lengths, batch_first=True, init_state=None, output_last_states=False):
batch_size = seqs.size(0) if batch_first else seqs.size(1)
state_shape = get_state_shape(rnn, batch_size, rnn.bidirectional)
# if init_state is None:
# h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
# else:
# h0 = init_state[0] # rnn.num_layers, batch_size, rnn.hidden_size
# c0 = init_state[1]
packed_pinputs, r_index, init_state = pack_for_rnn_seq(seqs, lengths, batch_first, init_state)
if len(init_state) == 0:
h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
init_state = (h0, c0)
output, last_state = rnn(packed_pinputs, init_state)
output = unpack_from_rnn_seq(output, r_index, batch_first)
if not output_last_states:
return output
else:
last_state = reverse_indice_for_state(last_state, r_index)
return output, last_state
def pack_sequence_for_linear(inputs, lengths, batch_first=True):
"""
:param inputs: [B, T, D] if batch_first
:param lengths: [B]
:param batch_first:
:return:
"""
batch_list = []
if batch_first:
for i, l in enumerate(lengths):
# print(inputs[i, :l].size())
batch_list.append(inputs[i, :l])
packed_sequence = torch.cat(batch_list, 0)
# if chuck:
# return list(torch.chunk(packed_sequence, chuck, dim=0))
# else:
return packed_sequence
else:
raise NotImplemented()
def chucked_forward(inputs, net, chuck=None):
if not chuck:
return net(inputs)
else:
output_list = [net(chuck) for chuck in torch.chunk(inputs, chuck, dim=0)]
return torch.cat(output_list, dim=0)
def unpack_sequence_for_linear(inputs, lengths, batch_first=True):
batch_list = []
max_l = max(lengths)
if not isinstance(inputs, list):
inputs = [inputs]
inputs = torch.cat(inputs)
if batch_first:
start = 0
for l in lengths:
end = start + l
batch_list.append(pad_1d(inputs[start:end], max_l))
start = end
return torch.stack(batch_list)
else:
raise NotImplemented()
def seq2seq_cross_entropy(logits, label, l, chuck=None, sos_truncate=True):
"""
:param logits: [exB, V] : exB = sum(l)
:param label: [B] : a batch of Label
:param l: [B] : a batch of LongTensor indicating the lengths of each inputs
:param chuck: Number of chuck to process
:return: A loss value
"""
packed_label = pack_sequence_for_linear(label, l)
cross_entropy_loss = functools.partial(F.cross_entropy, size_average=False)
total = sum(l)
assert total == logits.size(0) or packed_label.size(0) == logits.size(0), \
"logits length mismatch with label length."
if chuck:
logits_losses = 0
for x, y in zip(torch.chunk(logits, chuck, dim=0), torch.chunk(packed_label, chuck, dim=0)):
logits_losses += cross_entropy_loss(x, y)
return logits_losses * (1 / total)
else:
return cross_entropy_loss(logits, packed_label) * (1 / total)
def max_along_time(inputs, lengths, list_in=False):
"""
:param inputs: [B, T, D]
:param lengths: [B]
:return: [B * D] max_along_time
:param list_in:
"""
ls = list(lengths)
if not list_in:
b_seq_max_list = []
for i, l in enumerate(ls):
seq_i = inputs[i, :l, :]
seq_i_max, _ = seq_i.max(dim=0)
seq_i_max = seq_i_max.squeeze()
b_seq_max_list.append(seq_i_max)
return torch.stack(b_seq_max_list)
else:
b_seq_max_list = []
for i, l in enumerate(ls):
seq_i = inputs[i]
seq_i_max, _ = seq_i.max(dim=0)
seq_i_max = seq_i_max.squeeze()
b_seq_max_list.append(seq_i_max)
return torch.stack(b_seq_max_list)
def avg_along_time(inputs, lengths, list_in=False):
"""
:param inputs: [B, T, D]
:param lengths: [B]
:return: [B * D] max_along_time
:param list_in:
"""
ls = list(lengths)
if not list_in:
b_seq_avg_list = []
for i, l in enumerate(ls):
seq_i = inputs[i, :l, :]
seq_i_avg = seq_i.mean(dim=0)
seq_i_avg = seq_i_avg.squeeze()
b_seq_avg_list.append(seq_i_avg)
return torch.stack(b_seq_avg_list)
else:
b_seq_avg_list = []
for i, l in enumerate(ls):
seq_i = inputs[i]
seq_i_avg, _ = seq_i.mean(dim=0)
seq_i_avg = seq_i_avg.squeeze()
b_seq_avg_list.append(seq_i_avg)
return torch.stack(b_seq_avg_list)
# def length_truncate(inputs, lengths, max_len):
# """
# :param inputs: [B, T]
# :param lengths: [B]
# :param max_len: int
# :return: [B, T]
# """
# max_l = max(1, max_len)
# max_s1_l = min(max(lengths), max_l)
# lengths = lengths.clamp(min=1, max=max_s1_l)
# if inputs.size(1) > max_s1_l:
# inputs = inputs[:, :max_s1_l]
#
# return inputs, lengths, max_s1_l
def get_reverse_indices(indices, lengths):
r_indices = indices.data.new(indices.size()).fill_(0)
batch_size = indices.size(0)
for i in range(int(batch_size)):
b_ind = indices[i]
b_l = lengths[i]
for k, ind in enumerate(b_ind):
if k >= b_l:
break
r_indices[i, int(ind)] = k
return r_indices
def index_ordering(inputs, lengths, indices, pad_value=0):
"""
:param inputs: [B, T, ~]
:param lengths: [B]
:param indices: [B, T]
:return:
"""
batch_size = inputs.size(0)
ordered_out_list = []
for i in range(int(batch_size)):
b_input = inputs[i]
b_l = lengths[i]
b_ind = indices[i]
b_out = b_input[b_ind]
if b_out.size(0) > b_l:
b_out[b_l:] = pad_value
ordered_out_list.append(b_out)
outs = torch.stack(ordered_out_list, dim=0)
return outs
def start_and_end_token_handling(inputs, lengths, sos_index=1, eos_index=2, pad_index=0,
op=None):
"""
:param inputs: [B, T]
:param lengths: [B]
:param sos_index:
:param eos_index:
:param pad_index:
:return:
"""
batch_size = inputs.size(0)
if not op:
return inputs, lengths
elif op == 'rm_start':
inputs = torch.cat([inputs[:, 1:], Variable(inputs.data.new(batch_size, 1).zero_())], dim=1)
return inputs, lengths - 1
elif op == 'rm_end':
for i in range(batch_size):
pass
# Potential problems!?
# inputs[i, lengths[i] - 1] = pad_index
return inputs, lengths - 1
elif op == 'rm_both':
for i in range(batch_size):
pass
# Potential problems!?
# inputs[i, lengths[i] - 1] = pad_index
inputs = torch.cat([inputs[:, 1:], Variable(inputs.data.new(batch_size, 1).zero_())], dim=1)
return inputs, lengths - 2
def seq2seq_att(mems, lengths, state, att_net=None):
"""
:param mems: [B, T, D_mem] This are the memories.
I call memory for this variable because I think attention is just like read something and then
make alignments with your memories.
This memory here is usually the input hidden state of the encoder.
:param lengths: [B]
:param state: [B, D_state]
I call state for this variable because it's the state I percepts at this time step.
:param att_net: This is the attention network that will be used to calculate the alignment score between
state and memories.
input of the att_net is mems and state with shape:
mems: [exB, D_mem]
state: [exB, D_state]
return of the att_net is [exB, 1]
So any function that map a vector to a scalar could work.
:return: [B, D_result]
"""
d_state = state.size(1)
if not att_net:
return state
else:
batch_list_mems = []
batch_list_state = []
for i, l in enumerate(lengths):
b_mems = mems[i, :l] # [T, D_mem]
batch_list_mems.append(b_mems)
b_state = state[i].expand(b_mems.size(0), d_state) # [T, D_state]
batch_list_state.append(b_state)
packed_sequence_mems = torch.cat(batch_list_mems, 0) # [sum(l), D_mem]
packed_sequence_state = torch.cat(batch_list_state, 0) # [sum(l), D_state]
align_score = att_net(packed_sequence_mems, packed_sequence_state) # [sum(l), 1]
# The score grouped as [(a1, a2, a3), (a1, a2), (a1, a2, a3, a4)].
# aligned_seq = packed_sequence_mems * align_score
start = 0
result_list = []
for i, l in enumerate(lengths):
end = start + l
b_mems = packed_sequence_mems[start:end, :] # [l, D_mems]
b_score = align_score[start:end, :] # [l, 1]
softed_b_score = F.softmax(b_score.transpose(0, 1)).transpose(0, 1) # [l, 1]
weighted_sum = torch.sum(b_mems * softed_b_score, dim=0, keepdim=False) # [D_mems]
result_list.append(weighted_sum)
start = end
result = torch.stack(result_list, dim=0)
return result
# Test something
|
anli-main
|
src/flint/torch_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/flint/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
class FlintField(object):
@classmethod
def batching(cls, batched_data):
raise NotImplemented()
class RawFlintField(FlintField):
@classmethod
def batching(cls, batched_data):
return batched_data
class LabelFlintField(FlintField):
def batching(self, batched_data):
return torch.tensor(batched_data)
class ArrayIndexFlintField(FlintField):
def __init__(self, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False) -> None:
super().__init__()
self.pad_idx = pad_idx
self.eos_idx = eos_idx
self.left_pad = left_pad
self.move_eos_to_beginning = move_eos_to_beginning
def collate_tokens(self, values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
"""
Convert a list of 1d tensors into a padded 2d tensor.
"""
if not torch.is_tensor(values[0]):
values = [torch.tensor(v) for v in values]
size = max(v.size(0) for v in values)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def batching(self, batched_data):
return self.collate_tokens(batched_data,
self.pad_idx,
self.eos_idx,
self.left_pad,
self.move_eos_to_beginning)
|
anli-main
|
src/flint/data_utils/fields.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
from typing import Dict, Type
from flint.data_utils.fields import FlintField, RawFlintField
class BaseBatchBuilder(object):
def __init__(self, batching_schema: Dict[str, FlintField]) -> None:
super().__init__()
self.batching_schema: Dict[str, FlintField] = batching_schema
def __call__(self, batch):
field_names = batch[0].keys()
batched_data = dict()
for field_name in field_names:
if field_name not in self.batching_schema:
# default is RawFlintField
batched_data[field_name] = RawFlintField.batching([item[field_name] for item in batch])
else:
batched_data[field_name] = self.batching_schema[field_name].batching([item[field_name] for item in batch])
return batched_data
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# This is the best way to detect a NamedTuple, it turns out.
return obj.__class__(*(move_to_device(item, cuda_device) for item in obj))
elif isinstance(obj, tuple):
return tuple(move_to_device(item, cuda_device) for item in obj)
else:
return obj
if __name__ == '__main__':
print(RawFlintField.batching)
|
anli-main
|
src/flint/data_utils/batchbuilder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
|
anli-main
|
src/flint/data_utils/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import dreamerv2.api as dv2
from dreamerv2.train import run
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main(args):
## get defaults
config = dv2.defaults
if args.task:
if 'crafter' in args.task:
config = config.update(dv2.configs['crafter'])
elif 'minigrid' in args.task:
config = config.update(dv2.configs['minigrid'])
elif 'atari' in args.task:
config = config.update(dv2.configs['atari'])
elif 'dmc' in args.task:
config = config.update(dv2.configs['dmc_vision'])
params = vars(args)
config = config.update(params)
config = config.update({
'expl_behavior': 'Plan2Explore',
'pred_discount': False,
'grad_heads': ['decoder'], # this means we dont learn the reward head
'expl_intr_scale': 1.0,
'expl_extr_scale': 0.0,
'discount': 0.99,
})
run(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='RL')
# DreamerV2
parser.add_argument('--xpid', type=str, default=None, help='experiment id')
parser.add_argument('--steps', type=int, default=1e6, help='number of environment steps to train')
parser.add_argument('--train_every', type=int, default=1e5, help='number of environment steps to train')
parser.add_argument('--offline_model_train_steps', type=int, default=25001, help='=250 * train_every (in thousands) + 1. Default assumes 100k.')
parser.add_argument('--task', type=str, default='crafter_noreward', help='environment to train on')
parser.add_argument('--logdir', default='~/wm_logs/', help='directory to save agent logs')
parser.add_argument('--num_agents', type=int, default=1, help='exploration population size.')
parser.add_argument('--seed', type=int, default=100, help='seed for init NNs.')
parser.add_argument('--envs', type=int, default=1, help='number of training envs.')
parser.add_argument('--envs_parallel', type=str, default="none", help='how to parallelize.')
parser.add_argument('--eval_envs', type=int, default=1, help='number of parallel eval envs.')
parser.add_argument('--eval_eps', type=int, default=100, help='number of eval eps.')
parser.add_argument('--eval_type', type=str, default='coincidental', help='how to evaluate the model.')
parser.add_argument('--expl_behavior', type=str, default='Plan2Explore', help='algorithm for exploration: Plan2Explore or Random.')
parser.add_argument('--load_pretrained', type=str, default='none', help='name of pretrained model')
parser.add_argument('--offline_dir', type=str, default='none', help='directory to load offline dataset')
# CASCADE
parser.add_argument('--cascade_alpha', type=float, default=0, help='Cascade weight.')
parser.add_argument('--cascade_feat', type=str, default="deter", help='Cascade features if state based.')
parser.add_argument('--cascade_k', type=int, default=5, help='number of nearest neighbors to use in the mean dist.')
parser.add_argument('--cascade_sample', type=int, default=100, help='max number of cascade states')
args = parser.parse_args()
main(args)
|
cascade-main
|
main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
from tensorflow_probability import distributions as tfd
import agent
import common
class Random(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.act_space = act_space
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({
'actor.dist': 'onehot' if discrete else 'trunc_normal'})
def actor(self, feat):
shape = feat.shape[:-1] + self.act_space.shape
if self.config.actor.dist == 'onehot':
return common.OneHotDist(tf.zeros(shape))
else:
dist = tfd.Uniform(-tf.ones(shape), tf.ones(shape))
return tfd.Independent(dist, 1)
def train(self, start, context, data):
return None, {}
class Plan2Explore(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
self.reward = reward
self.wm = wm
self._init_actors()
stoch_size = config.rssm.stoch
if config.rssm.discrete:
stoch_size *= config.rssm.discrete
size = {
'embed': 32 * config.encoder.cnn_depth,
'stoch': stoch_size,
'deter': config.rssm.deter,
'feat': config.rssm.stoch + config.rssm.deter,
}[self.config.disag_target]
self._networks = [
common.MLP(size, **config.expl_head)
for _ in range(config.disag_models)]
self.opt = common.Optimizer('expl', **config.expl_opt)
self.extr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)
def _init_actors(self):
self.intr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)
self.ac = [agent.ActorCritic(self.config, self.act_space, self.tfstep) for _ in range(self.config.num_agents)]
if self.config.cascade_alpha > 0:
self.intr_rewnorm_cascade = [common.StreamNorm(**self.config.expl_reward_norm) for _ in range(self.config.num_agents)]
self.actor = [ac.actor for ac in self.ac]
def train(self, start, context, data):
metrics = {}
stoch = start['stoch']
if self.config.rssm.discrete:
stoch = tf.reshape(
stoch, stoch.shape[:-2] + (stoch.shape[-2] * stoch.shape[-1]))
target = {
'embed': context['embed'],
'stoch': stoch,
'deter': start['deter'],
'feat': context['feat'],
}[self.config.disag_target]
inputs = context['feat']
if self.config.disag_action_cond:
action = tf.cast(data['action'], inputs.dtype)
inputs = tf.concat([inputs, action], -1)
metrics.update(self._train_ensemble(inputs, target))
gpu = tf.config.list_physical_devices('GPU')
if gpu:
tf.config.experimental.set_memory_growth(gpu[0], True)
print(f"Before: {tf.config.experimental.get_memory_usage('GPU:0')}", flush=True)
self.cascade = []
reward_func = self._intr_reward_incr
print("training explorers", flush=True)
[metrics.update(ac.train(self.wm, start, data['is_terminal'], reward_func)) for ac in self.ac]
self.cascade = []
print("finished training explorers", flush=True)
return None, metrics
def _intr_reward(self, seq, rtn_meta=True):
inputs = seq['feat']
if self.config.disag_action_cond:
action = tf.cast(seq['action'], inputs.dtype)
inputs = tf.concat([inputs, action], -1)
preds = [head(inputs).mode() for head in self._networks]
disag = tf.cast(tf.tensor(preds).std(0).mean(-1), tf.float16)
if self.config.disag_log:
disag = tf.math.log(disag)
reward = self.config.expl_intr_scale * self.intr_rewnorm(disag)[0]
if self.config.expl_extr_scale:
reward += self.config.expl_extr_scale * self.extr_rewnorm(
self.reward(seq))[0]
if rtn_meta:
return reward, {'Disagreement': [disag.mean()]}
else:
return reward
@tf.function
def get_dists(self, obs, cascade):
### zzz way to do this
out = []
for idx in range(obs.shape[1]):
cascade = tf.reshape(cascade, [-1, cascade.shape[-1]])
ob = tf.reshape(obs[:, idx, :], [obs.shape[0], 1, obs.shape[-1]])
dists = tf.math.sqrt(tf.einsum('ijk, ijk->ij', cascade - ob, cascade - ob))
topk_mean = tf.negative(tf.math.top_k(tf.negative(dists), k=self.config.cascade_k)[0])
out += [tf.reshape(tf.math.reduce_mean(topk_mean, axis=-1), (1, -1))]
return tf.concat(out, axis=1)
def get_cascade_entropy(self):
cascade = tf.concat(self.cascade, axis=0)
cascade = tf.reshape(cascade, [-1, cascade.shape[-1]])
entropy = tf.math.reduce_variance(cascade, axis=-1).mean()
self.entropy = entropy
return entropy
def _intr_reward_incr(self, seq):
agent_idx = len(self.cascade)
## disagreement
reward, met = self._intr_reward(seq)
# CASCADE
if self.config.cascade_alpha > 0:
## reward = (1 - \alpha) * disagreement + \alpha * diversity
if len(self.cascade) == 0:
idxs = tf.range(tf.shape(seq[self.config.cascade_feat])[1])
size = min(seq[self.config.cascade_feat].shape[1], self.config.cascade_sample)
self.ridxs = tf.random.shuffle(idxs)[:size]
self.dist = None
self.entropy = 0
self.cascade.append(tf.gather(seq[self.config.cascade_feat][-1], self.ridxs, axis=1))
cascade_reward = self.get_cascade_entropy()
cascade_reward = tf.concat([tf.cast(tf.zeros([seq[self.config.cascade_feat].shape[0] - 1, seq[self.config.cascade_feat].shape[1]]), tf.float16), tf.cast(tf.broadcast_to(cascade_reward, shape=(1, seq[self.config.cascade_feat].shape[1])), tf.float16)], axis=0)
cascade_reward = self.intr_rewnorm_cascade[agent_idx](cascade_reward)[0]
met.update({'Diversity': [cascade_reward.mean()]})
reward = reward * (1 - self.config.cascade_alpha) + self.config.cascade_alpha * cascade_reward
return reward, met
def _train_ensemble(self, inputs, targets):
if self.config.disag_offset:
targets = targets[:, self.config.disag_offset:]
inputs = inputs[:, :-self.config.disag_offset]
targets = tf.stop_gradient(targets)
inputs = tf.stop_gradient(inputs)
with tf.GradientTape() as tape:
preds = [head(inputs) for head in self._networks]
loss = -sum([pred.log_prob(targets).mean() for pred in preds])
metrics = self.opt(tape, loss, self._networks)
return metrics
class ModelLoss(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.reward = reward
self.wm = wm
self.ac = agent.ActorCritic(config, act_space, tfstep)
self.actor = self.ac.actor
self.head = common.MLP([], **self.config.expl_head)
self.opt = common.Optimizer('expl', **self.config.expl_opt)
def train(self, start, context, data):
metrics = {}
target = tf.cast(context[self.config.expl_model_loss], tf.float16)
with tf.GradientTape() as tape:
loss = -self.head(context['feat']).log_prob(target).mean()
metrics.update(self.opt(tape, loss, self.head))
metrics.update(self.ac.train(
self.wm, start, data['is_terminal'], self._intr_reward))
return None, metrics
def _intr_reward(self, seq):
reward = self.config.expl_intr_scale * self.head(seq['feat']).mode()
if self.config.expl_extr_scale:
reward += self.config.expl_extr_scale * self.reward(seq)
return reward
|
cascade-main
|
dreamerv2/expl.py
|
import logging
import os
import pathlib
import sys
import warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger().setLevel('ERROR')
warnings.filterwarnings('ignore', '.*box bound precision lowered.*')
sys.path.append(str(pathlib.Path(__file__).parent))
sys.path.append(str(pathlib.Path(__file__).parent.parent))
import ruamel.yaml as yaml
import common
configs = yaml.safe_load(
(pathlib.Path(__file__).parent / 'configs.yaml').read_text())
defaults = common.Config(configs.pop('defaults'))
|
cascade-main
|
dreamerv2/api.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import mixed_precision as prec
from dreamerv2 import common
from dreamerv2 import expl
tfd = tfp.distributions
class Agent(common.Module):
def __init__(self, config, obs_space, act_space, step):
self.config = config
self.obs_space = obs_space
self.act_space = act_space['action']
self.step = step
self.tfstep = tf.Variable(int(self.step), tf.int64)
self.wm = WorldModel(config, obs_space, self.tfstep)
if config.task in common.DMC_TASK_IDS:
self._task_behavior = {
key: ActorCritic(config, self.act_space, self.tfstep)
for key in common.DMC_TASK_IDS[config.task]
}
else:
self._task_behavior = ActorCritic(config, self.act_space, self.tfstep)
if config.expl_behavior == 'greedy':
self._expl_behavior = self._task_behavior
else:
self._expl_behavior = getattr(expl, config.expl_behavior)(
self.config, self.act_space, self.wm, self.tfstep,
lambda seq: self.wm.heads['reward'](seq['feat']).mode())
@tf.function
def policy(self, obs, state=None, policy_idx=0, mode='train', goal=''):
obs = tf.nest.map_structure(tf.tensor, obs)
tf.py_function(lambda: self.tfstep.assign(
int(self.step), read_value=False), [], [])
if state is None:
latent = self.wm.rssm.initial(obs['reward'].shape[0])
action = tf.zeros((obs['reward'].shape[0],) + self.act_space.shape)
state = latent, action
latent, action = state
embed = self.wm.encoder(self.wm.preprocess(obs))
sample = (mode == 'train') or not self.config.eval_state_mean
latent, _ = self.wm.rssm.obs_step(
latent, action, embed, obs['is_first'], sample)
feat = self.wm.rssm.get_feat(latent)
if mode == 'eval':
if goal == '':
actor = self._task_behavior.actor(feat)
else:
actor = self._task_behavior[goal].actor(feat)
action = actor.mode()
noise = self.config.eval_noise
elif mode == 'explore':
try:
actor = self._expl_behavior.actor[policy_idx](feat)
except:
print("Population version not implemented")
actor = self._expl_behavior.actor(feat)
action = actor.sample()
noise = self.config.expl_noise
elif mode == 'train':
actor = self._task_behavior.actor(feat)
action = actor.sample()
noise = self.config.expl_noise
action = common.action_noise(action, noise, self.act_space)
outputs = {'action': action}
state = (latent, action)
return outputs, state
@tf.function
def train(self, data, state=None):
## deprecated
metrics = {}
state, outputs, mets = self.wm.train(data, state)
metrics.update(mets)
start = outputs['post']
if self.config.eval_type == "labels" or 'dmc' in self.config.task:
if isinstance(self._task_behavior, dict):
for key in self._task_behavior.keys():
reward = lambda seq: (self.wm.heads['reward_' + key](seq['feat']).mode(), {})
mets = self._task_behavior[key].train(
self.wm, start, data['is_terminal'], reward)
metrics.update(**{k+'_'+key: v for k, v in mets.items()})
else:
reward = lambda seq: (self.wm.heads['reward'](seq['feat']).mode(), {})
metrics.update(self._task_behavior.train(
self.wm, start, data['is_terminal'], reward))
if self.config.expl_behavior != 'greedy':
mets = self._expl_behavior.train(start, outputs, data)[-1]
metrics.update({'expl_' + key: value for key, value in mets.items()})
return state, metrics
@tf.function
def report(self, data):
report = {}
data = self.wm.preprocess(data)
for key in self.wm.heads['decoder'].cnn_keys:
name = key.replace('/', '_')
report[f'openl_{name}'] = self.wm.video_pred(data, key)
return report
class WorldModel(common.Module):
def __init__(self, config, obs_space, tfstep):
shapes = {k: tuple(v.shape) for k, v in obs_space.items()}
self.config = config
self.tfstep = tfstep
self.rssm = common.EnsembleRSSM(**config.rssm)
self.encoder = common.Encoder(shapes, **config.encoder)
self.heads = {
'decoder': common.Decoder(shapes, **config.decoder),
}
if config.task in common.DMC_TASK_IDS:
self.heads.update({f'reward_{common.DMC_TASK_IDS[config.task][idx]}': common.MLP([], **config.reward_head)
for idx in range(len(common.DMC_TASK_IDS[config.task]))})
else:
self.heads['reward'] = common.MLP([], **config.reward_head)
if config.pred_discount:
self.heads['discount'] = common.MLP([], **config.discount_head)
for name in config.grad_heads:
assert name in self.heads, name
self.model_opt = common.Optimizer('model', **config.model_opt)
def train(self, data, state=None):
with tf.GradientTape() as model_tape:
model_loss, state, outputs, metrics = self.loss(data, state)
modules = [self.encoder, self.rssm, *self.heads.values()]
metrics.update(self.model_opt(model_tape, model_loss, modules))
return state, outputs, metrics
def loss(self, data, state=None):
data = self.preprocess(data)
embed = self.encoder(data)
post, prior = self.rssm.observe(
embed, data['action'], data['is_first'], state)
kl_loss, kl_value = self.rssm.kl_loss(post, prior, **self.config.kl)
assert len(kl_loss.shape) == 0
likes = {}
losses = {'kl': kl_loss}
feat = self.rssm.get_feat(post)
for name, head in self.heads.items():
grad_head = (name in self.config.grad_heads)
inp = feat if grad_head else tf.stop_gradient(feat)
out = head(inp)
dists = out if isinstance(out, dict) else {name: out}
for key, dist in dists.items():
if 'reward_' in key:
_, rew_key = key.split('_')
print(f"\n\nStart Training Reward Head {rew_key}...", flush=True)
rew_idx = common.DMC_TASK_IDS[self.config.task].index(rew_key)
like = tf.cast(dist.log_prob(data['reward'][:, :, rew_idx]), tf.float32)
else:
like = tf.cast(dist.log_prob(data[key]), tf.float32)
likes[key] = like
losses[key] = -like.mean()
model_loss = sum(
self.config.loss_scales.get(k, 1.0) * v for k, v in losses.items())
outs = dict(
embed=embed, feat=feat, post=post,
prior=prior, likes=likes, kl=kl_value)
metrics = {f'{name}_loss': value for name, value in losses.items()}
metrics['model_kl'] = kl_value.mean()
metrics['prior_ent'] = self.rssm.get_dist(prior).entropy().mean()
metrics['post_ent'] = self.rssm.get_dist(post).entropy().mean()
last_state = {k: v[:, -1] for k, v in post.items()}
return model_loss, last_state, outs, metrics
def imagine(self, policy, start, is_terminal, horizon, idx=None):
flatten = lambda x: x.reshape([-1] + list(x.shape[2:]))
start = {k: flatten(v) for k, v in start.items()}
start['feat'] = self.rssm.get_feat(start)
if idx:
start['action'] = tf.zeros_like(policy(start['feat'], idx=idx).mode())
else:
start['action'] = tf.zeros_like(policy(start['feat']).mode())
seq = {k: [v] for k, v in start.items()}
for _ in range(horizon):
if idx:
action = policy(tf.stop_gradient(seq['feat'][-1]), idx=idx).sample()
else:
action = policy(tf.stop_gradient(seq['feat'][-1])).sample()
state = self.rssm.img_step({k: v[-1] for k, v in seq.items()}, action)
feat = self.rssm.get_feat(state)
for key, value in {**state, 'action': action, 'feat': feat}.items():
seq[key].append(value)
seq = {k: tf.stack(v, 0) for k, v in seq.items()}
if 'discount' in self.heads:
disc = self.heads['discount'](seq['feat']).mean()
if is_terminal is not None:
# Override discount prediction for the first step with the true
# discount factor from the replay buffer.
true_first = 1.0 - flatten(is_terminal).astype(disc.dtype)
true_first *= self.config.discount
disc = tf.concat([true_first[None], disc[1:]], 0)
else:
disc = self.config.discount * tf.ones(seq['feat'].shape[:-1])
seq['discount'] = disc
# Shift discount factors because they imply whether the following state
# will be valid, not whether the current state is valid.
seq['weight'] = tf.math.cumprod(
tf.concat([tf.ones_like(disc[:1]), disc[:-1]], 0), 0)
return seq
@tf.function
def preprocess(self, obs):
dtype = prec.global_policy().compute_dtype
obs = obs.copy()
for key, value in obs.items():
if key.startswith('log_'):
continue
if value.dtype == tf.int32:
value = value.astype(dtype)
if value.dtype == tf.uint8:
value = value.astype(dtype) / 255.0 - 0.5
obs[key] = value
obs['reward'] = {
'identity': tf.identity,
'sign': tf.sign,
'tanh': tf.tanh,
}[self.config.clip_rewards](obs['reward'])
if 'discount' not in obs:
obs['discount'] = 1.0 - obs['is_terminal'].astype(dtype)
obs['discount'] *= self.config.discount
return obs
@tf.function
def video_pred(self, data, key):
decoder = self.heads['decoder']
truth = data[key][:6] + 0.5
embed = self.encoder(data)
states, _ = self.rssm.observe(
embed[:6, :5], data['action'][:6, :5], data['is_first'][:6, :5])
recon = decoder(self.rssm.get_feat(states))[key].mode()[:6]
init = {k: v[:, -1] for k, v in states.items()}
prior = self.rssm.imagine(data['action'][:6, 5:], init)
openl = decoder(self.rssm.get_feat(prior))[key].mode()
model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1)
error = (model - truth + 1) / 2
video = tf.concat([truth, model, error], 2)
B, T, H, W, C = video.shape
return video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
class ActorCritic(common.Module):
def __init__(self, config, act_space, tfstep):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({
'actor.dist': 'onehot' if discrete else 'trunc_normal'})
if self.config.actor_grad == 'auto':
self.config = self.config.update({
'actor_grad': 'reinforce' if discrete else 'dynamics'})
self.actor = common.MLP(act_space.shape[0], **self.config.actor)
self.critic = common.MLP([], **self.config.critic)
if self.config.slow_target:
self._target_critic = common.MLP([], **self.config.critic)
self._updates = tf.Variable(0, tf.int64)
else:
self._target_critic = self.critic
self.actor_opt = common.Optimizer('actor', **self.config.actor_opt)
self.critic_opt = common.Optimizer('critic', **self.config.critic_opt)
self.rewnorm = common.StreamNorm(**self.config.reward_norm)
def train(self, world_model, start, is_terminal, reward_fn):
metrics = {}
hor = self.config.imag_horizon
# The weights are is_terminal flags for the imagination start states.
# Technically, they should multiply the losses from the second trajectory
# step onwards, which is the first imagined step. However, we are not
# training the action that led into the first step anyway, so we can use
# them to scale the whole sequence.
with tf.GradientTape() as actor_tape:
seq = world_model.imagine(self.actor, start, is_terminal, hor)
reward, mets0 = reward_fn(seq)
seq['reward'], mets1 = self.rewnorm(reward)
mets1 = {f'reward_{k}': v for k, v in mets1.items()}
target, mets2 = self.target(seq)
actor_loss, mets3 = self.actor_loss(seq, target)
with tf.GradientTape() as critic_tape:
critic_loss, mets4 = self.critic_loss(seq, target)
metrics.update(self.actor_opt(actor_tape, actor_loss, self.actor))
metrics.update(self.critic_opt(critic_tape, critic_loss, self.critic))
metrics.update(**mets0, **mets1, **mets2, **mets3, **mets4)
self.update_slow_target() # Variables exist after first forward pass.
return metrics
def actor_loss(self, seq, target):
# Actions: 0 [a1] [a2] a3
# ^ | ^ | ^ |
# / v / v / v
# States: [z0]->[z1]-> z2 -> z3
# Targets: t0 [t1] [t2]
# Baselines: [v0] [v1] v2 v3
# Entropies: [e1] [e2]
# Weights: [ 1] [w1] w2 w3
# Loss: l1 l2
metrics = {}
# Two states are lost at the end of the trajectory, one for the boostrap
# value prediction and one because the corresponding action does not lead
# anywhere anymore. One target is lost at the start of the trajectory
# because the initial state comes from the replay buffer.
policy = self.actor(tf.stop_gradient(seq['feat'][:-2]))
if self.config.actor_grad == 'dynamics':
objective = target[1:]
elif self.config.actor_grad == 'reinforce':
baseline = self._target_critic(seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
elif self.config.actor_grad == 'both':
baseline = self._target_critic(seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
mix = common.schedule(self.config.actor_grad_mix, self.tfstep)
objective = mix * target[1:] + (1 - mix) * objective
metrics['actor_grad_mix'] = mix
else:
raise NotImplementedError(self.config.actor_grad)
ent = policy.entropy()
ent_scale = common.schedule(self.config.actor_ent, self.tfstep)
objective += ent_scale * ent
weight = tf.stop_gradient(seq['weight'])
actor_loss = -(weight[:-2] * objective).mean()
metrics['actor_ent'] = ent.mean()
metrics['actor_ent_scale'] = ent_scale
return actor_loss, metrics
def critic_loss(self, seq, target):
# States: [z0] [z1] [z2] z3
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] v3
# Weights: [ 1] [w1] [w2] w3
# Targets: [t0] [t1] [t2]
# Loss: l0 l1 l2
dist = self.critic(seq['feat'][:-1])
target = tf.stop_gradient(target)
weight = tf.stop_gradient(seq['weight'])
critic_loss = -(dist.log_prob(target) * weight[:-1]).mean()
metrics = {'critic': dist.mode().mean()}
return critic_loss, metrics
def target(self, seq):
# States: [z0] [z1] [z2] [z3]
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] [v3]
# Discount: [d0] [d1] [d2] d3
# Targets: t0 t1 t2
reward = tf.cast(seq['reward'], tf.float32)
disc = tf.cast(seq['discount'], tf.float32)
value = self._target_critic(seq['feat']).mode()
# Skipping last time step because it is used for bootstrapping.
target = common.lambda_return(
reward[:-1], value[:-1], disc[:-1],
bootstrap=value[-1],
lambda_=self.config.discount_lambda,
axis=0)
metrics = {'critic_slow': value.mean(), 'critic_target': target.mean()}
return target, metrics
def update_slow_target(self):
if self.config.slow_target:
if self._updates % self.config.slow_target_update == 0:
mix = 1.0 if self._updates == 0 else float(
self.config.slow_target_fraction)
for s, d in zip(self.critic.variables, self._target_critic.variables):
d.assign(mix * s + (1 - mix) * d)
self._updates.assign_add(1)
class PopulationActorCritic(ActorCritic):
def __init__(self, config, act_space, tfstep):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
self.num_agents = config.num_agents
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({'actor.dist': 'onehot' if discrete else 'trunc_normal'})
if self.config.actor_grad == 'auto':
self.config = self.config.update({'actor_grad': 'reinforce'})
self.actor = [common.MLP(act_space.shape[0], **self.config.actor) for _ in range(self.num_agents)]
self.critic = [common.MLP([], **self.config.critic) for _ in range(self.num_agents)]
if self.config.slow_target:
self._target_critic = [common.MLP([], **self.config.critic) for _ in range(self.num_agents)]
self._updates = tf.Variable(0, tf.int64)
else:
self._target_critic = self.critic
self.actor_opt = [common.Optimizer('actor', **self.config.actor_opt) for _ in range(self.num_agents)]
self.critic_opt = [common.Optimizer('critic', **self.config.critic_opt) for _ in range(self.num_agents)]
self.rewnorm = [common.StreamNorm(**self.config.reward_norm) for _ in range(self.num_agents)]
def train_indiv(self, world_model, start, is_terminal, reward_fn, cascade, idx, return_seq=False):
metrics = {}
hor = self.config.imag_horizon
with tf.GradientTape() as actor_tape:
seq = world_model.imagine(self.actor[idx], start, is_terminal, hor)
reward, rew_meta = reward_fn(seq, cascade, rtn_meta=True)
seq['reward'], mets1 = self.rewnorm[idx](reward)
mets1 = {f'reward_{k}': v for k, v in mets1.items()}
target, mets2 = self.target(seq, idx)
actor_loss, mets3 = self.actor_loss(seq, target, idx)
with tf.GradientTape() as critic_tape:
critic_loss, mets4 = self.critic_loss(seq, target, idx)
actor_mets = self.actor_opt[idx](actor_tape, actor_loss, self.actor[idx])
critic_mets = self.critic_opt[idx](critic_tape, critic_loss, self.critic[idx])
#metrics.update(**mets1, **mets2, **mets3, **mets4, **actor_mets, **critic_mets)
metrics.update(**rew_meta)
self.update_slow_target(idx=idx) # Variables exist after first forward pass.
if return_seq:
return metrics, seq
else:
return metrics
def train(self, world_model, start, is_terminal, reward_fn):
metrics = {}
cascade = []
for agent_idx in range(self.num_agents):
# update state based cascade
if self.config.cascade_metric == "euclidean":
mets, seq = self.train_indiv(world_model, start, is_terminal, reward_fn, cascade, agent_idx, return_seq=True)
if agent_idx == 0:
idxs = tf.range(tf.shape(seq[self.config.cascade_feat])[1])
ridxs = tf.random.shuffle(idxs)[:10]
cascade.append(tf.gather(seq[self.config.cascade_feat], ridxs, axis=1))
else:
mets = self.train_indiv(world_model, start, is_terminal, reward_fn, cascade, agent_idx, return_seq=False)
metrics.update(**{f'agent{agent_idx}' + key: val for key, val in mets.items()})
print(f"Trained explorer {agent_idx}", flush=True)
return metrics
def actor_loss(self, seq, target, idx):
# See description in ActorCritic for more info
metrics = {}
policy = self.actor[idx](tf.stop_gradient(seq['feat'][:-2]))
if self.config.actor_grad == 'dynamics':
objective = target[1:]
elif self.config.actor_grad == 'reinforce':
baseline = self._target_critic[idx](seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
elif self.config.actor_grad == 'both':
baseline = self._target_critic[idx](seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
mix = common.schedule(self.config.actor_grad_mix, self.tfstep)
objective = mix * target[1:] + (1 - mix) * objective
metrics['actor_grad_mix'] = mix
else:
raise NotImplementedError(self.config.actor_grad)
ent = policy.entropy()
ent_scale = common.schedule(self.config.actor_ent, self.tfstep)
objective += ent_scale * ent
weight = tf.stop_gradient(seq['weight'])
actor_loss = -(weight[:-2] * objective).mean()
metrics['actor_ent'] = ent.mean()
metrics['actor_ent_scale'] = ent_scale
return actor_loss, metrics
def critic_loss(self, seq, target, idx):
# States: [z0] [z1] [z2] z3
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] v3
# Weights: [ 1] [w1] [w2] w3
# Targets: [t0] [t1] [t2]
# Loss: l0 l1 l2
dist = self.critic[idx](seq['feat'][:-1])
target = tf.stop_gradient(target)
weight = tf.stop_gradient(seq['weight'])
critic_loss = -(dist.log_prob(target) * weight[:-1]).mean()
metrics = {'critic': dist.mode().mean()}
return critic_loss, metrics
def target(self, seq, idx):
# States: [z0] [z1] [z2] [z3]
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] [v3]
# Discount: [d0] [d1] [d2] d3
# Targets: t0 t1 t2
reward = tf.cast(seq['reward'], tf.float32)
disc = tf.cast(seq['discount'], tf.float32)
value = self._target_critic[idx](seq['feat']).mode()
# Skipping last time step because it is used for bootstrapping.
target = common.lambda_return(reward[:-1], value[:-1], disc[:-1], bootstrap=value[-1], lambda_=self.config.discount_lambda, axis=0)
metrics = {'critic_slow': value.mean(), 'critic_target': target.mean()}
return target, metrics
def update_slow_target(self, idx=0):
if self.config.slow_target:
if self._updates % self.config.slow_target_update == 0:
mix = 1.0 if self._updates == 0 else float(self.config.slow_target_fraction)
for s, d in zip(self.critic[idx].variables, self._target_critic[idx].variables):
d.assign(mix * s + (1 - mix) * d)
self._updates.assign_add(1)
|
cascade-main
|
dreamerv2/agent.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pathlib
import re
import sys
import warnings
import pickle
try:
import rich.traceback
rich.traceback.install()
except ImportError:
pass
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger().setLevel('ERROR')
warnings.filterwarnings('ignore', '.*box bound precision lowered.*')
sys.path.append(str(pathlib.Path(__file__).parent))
sys.path.append(str(pathlib.Path(__file__).parent.parent))
import numpy as np
import agent
import common
def run(config):
logdir = pathlib.Path(config.logdir + config.xpid).expanduser()
logdir.mkdir(parents=True, exist_ok=True)
config.save(logdir / 'config.yaml')
print(config, '\n')
print('Logdir', logdir)
import tensorflow as tf
tf.config.experimental_run_functions_eagerly(not config.jit)
message = 'No GPU found. To actually train on CPU remove this assert.'
if len(tf.config.experimental.list_physical_devices('GPU')) == 0:
print(message)
else:
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
assert config.precision in (16, 32), config.precision
if config.precision == 16:
from tensorflow.keras.mixed_precision import experimental as prec
prec.set_policy(prec.Policy('mixed_float16'))
## Load the stats that we keep track of
if (logdir / 'stats.pkl').exists():
stats = pickle.load(open(f"{logdir}/stats.pkl", "rb"))
print("Loaded stats: ", stats)
else:
stats = {
'num_deployments': 0,
'num_trains': 0,
'num_evals': 0
}
pickle.dump(stats, open(f"{logdir}/stats.pkl", "wb"))
multi_reward = config.task in common.DMC_TASK_IDS
replay_dir = logdir / 'train_episodes'
## load dataset - we dont want to load offline again if we have already deployed
if config.offline_dir == 'none' or stats['num_deployments'] > 0:
train_replay = common.Replay(replay_dir, offline_init=False,
multi_reward=multi_reward, **config.replay)
else:
train_replay = common.Replay(replay_dir, offline_init=True,
multi_reward=multi_reward, offline_directory=config.offline_dir, **config.replay)
eval_replay = common.Replay(logdir / 'eval_episodes', **dict(
capacity=config.replay.capacity // 10,
minlen=config.dataset.length,
maxlen=config.dataset.length,
multi_reward=multi_reward))
step = common.Counter(train_replay.stats['total_steps'])
outputs = [
common.TerminalOutput(),
common.JSONLOutput(logdir),
common.TensorBoardOutput(logdir),
]
logger = common.Logger(step, outputs, multiplier=config.action_repeat)
def make_env(mode, seed=1):
if '_' in config.task:
suite, task = config.task.split('_', 1)
else:
suite, task = config.task, ''
if suite == 'dmc':
env = common.DMC(
task, config.action_repeat, config.render_size, config.dmc_camera, save_path=logdir / 'videos')
env = common.NormalizeAction(env)
elif suite == 'atari':
env = common.Atari(
task, config.action_repeat, config.render_size,
config.atari_grayscale, life_done=False, save_path=logdir / 'videos') # do not terminate on life loss
env = common.OneHotAction(env)
elif suite == 'crafter':
assert config.action_repeat == 1
outdir = logdir / 'crafter' if mode == 'train' else None
reward = bool(['noreward', 'reward'].index(task)) or mode == 'eval'
env = common.Crafter(outdir, reward, save_path=logdir / 'videos')
env = common.OneHotAction(env)
elif suite == 'minigrid':
if mode == 'eval':
env = common.make_minigrid_env(task, fix_seed=True, seed=seed)
else:
env = common.make_minigrid_env(task, fix_seed=False, seed=None)
else:
raise NotImplementedError(suite)
env = common.TimeLimit(env, config.time_limit)
return env
def per_episode(ep, mode, task='none'):
length = len(ep['reward']) - 1
if task in common.DMC_TASK_IDS:
scores = {
key: np.sum([val[idx] for val in ep['reward'][1:]])
for idx, key in enumerate(common.DMC_TASK_IDS[task])}
print_rews = f'{mode.title()} episode has {length} steps and returns '
print_rews += ''.join([f"{key}:{np.round(val,1)} " for key,val in scores.items()])
print(print_rews)
for key,val in scores.items():
logger.scalar(f'{mode}_return_{key}', val)
else:
score = float(ep['reward'].astype(np.float64).sum())
print(f'{mode.title()} episode has {length} steps and return {score:.1f}.')
logger.scalar(f'{mode}_return', score)
logger.scalar(f'{mode}_length', length)
for key, value in ep.items():
if re.match(config.log_keys_sum, key):
logger.scalar(f'sum_{mode}_{key}', ep[key].sum())
if re.match(config.log_keys_mean, key):
logger.scalar(f'mean_{mode}_{key}', ep[key].mean())
if re.match(config.log_keys_max, key):
logger.scalar(f'max_{mode}_{key}', ep[key].max(0).mean())
replay = dict(train=train_replay, eval=eval_replay)[mode]
logger.add(replay.stats, prefix=mode)
logger.write()
print('Create envs.\n')
train_envs = [make_env('train') for _ in range(config.envs)]
eval_envs = [make_env('eval') for _ in range(config.eval_envs)]
act_space = train_envs[0].act_space
obs_space = train_envs[0].obs_space
train_driver = common.Driver(train_envs)
train_driver.on_episode(lambda ep: per_episode(ep, mode='train', task=config.task))
train_driver.on_step(lambda tran, worker: step.increment())
train_driver.on_step(train_replay.add_step)
train_driver.on_reset(train_replay.add_step)
eval_driver = common.Driver(eval_envs)
eval_driver.on_episode(eval_replay.add_episode)
eval_driver.on_episode(lambda ep: per_episode(ep, mode='eval', task=config.task))
if stats['num_deployments'] == 0:
if config.offline_dir == 'none':
prefill = max(0, config.train_every - train_replay.stats['total_steps'])
if prefill:
print(f'Prefill dataset ({prefill} steps).')
random_agent = common.RandomAgent(act_space)
train_driver(random_agent, steps=prefill, episodes=1, policy_idx=-1)
train_driver.reset()
eval_driver(random_agent, episodes=1, policy_idx=-1)
eval_driver.reset()
stats['num_deployments'] += 1
train_dataset = iter(train_replay.dataset(**config.offline_model_dataset))
print('Create agent.\n')
agnt = agent.Agent(config, obs_space, act_space, step)
train_agent = common.CarryOverState(agnt.train)
# Attempt to load pretrained full model.
# this can be used to test zero-shot performance on new tasks.
if config.load_pretrained != "none":
print("\nLoading pretrained model...")
train_agent(next(train_dataset))
path = pathlib.Path(config.load_pretrained).expanduser()
agnt.load(path)
## Assume we've done 1 full cycle
stats = {
'num_deployments': 1,
'num_trains': 1,
'num_evals': 1
}
print("\nSuccessfully loaded pretrained model.")
else:
print("\nInitializing agent...")
train_agent(next(train_dataset))
if (logdir / 'variables.pkl').exists():
print("\nStart loading model checkpoint...")
agnt.load(logdir / 'variables.pkl')
print("\nFinished initialize agent.")
# Initialize policies
eval_policies = {}
tasks = ['']
if config.task in common.DMC_TASK_IDS:
tasks = common.DMC_TASK_IDS[config.task]
for task in tasks:
eval_policies[task] = lambda *args: agnt.policy(*args, mode='eval', goal=task)
expl_policies = {}
for idx in range(config.num_agents):
expl_policies[idx] = lambda *args: agnt.policy(*args, policy_idx=idx, mode='explore')
## each loop we do one of the following:
# 1. deploy explorers to collect data
# 2. train WM, explorers, task policies etc.
# 3. evaluate models
while step < config.steps:
print(f"\nMain loop step {step.value}")
should_deploy = stats['num_deployments'] <= stats['num_evals']
should_train_wm = stats['num_trains'] < stats['num_deployments']
should_eval = stats['num_evals'] < stats['num_trains']
assert should_deploy + should_train_wm + should_eval == 1
if should_deploy:
print("\n\nStart collecting data...", flush=True)
## collect a batch of steps with the expl policy
## need to increment steps here
num_steps = int(config.train_every / config.num_agents)
for idx in range(config.num_agents):
expl_policy = expl_policies[idx]
train_driver(expl_policy, steps=num_steps, policy_idx=idx)
stats['num_deployments'] += 1
elif should_eval:
print('\n\nStart evaluation...', flush=True)
if int(step.value) % int(config.eval_every) != 0 or config.eval_type == 'none':
pass
elif config.eval_type == 'coincidental':
mets = common.eval(eval_driver, config, expl_policies, logdir)
for name, values in mets.items():
logger.scalar(name, np.array(values, np.float64).mean())
logger.write()
elif config.eval_type == 'labels':
tasks = ['']
if config.task in common.DMC_TASK_IDS:
tasks = common.DMC_TASK_IDS[config.task]
for idx, task in enumerate(tasks):
print("\n\nStart Evaluating " + task)
eval_policy = eval_policies[task]
eval_driver(eval_policy, episodes=config.eval_eps)
mets = common.get_stats(eval_driver, task=config.task, num_agents=config.num_agents, logdir=logdir)
rew = mets["eval_reward_" + task] if task != '' else mets["eval_reward"]
# logging
logger.scalar("eval_reward_" + task, np.mean(rew))
logger.write()
stats['num_evals'] += 1
elif should_train_wm:
print('\n\nStart model training...')
should_pretrain = (stats['num_trains'] == 0 and config.offline_dir != "none")
if should_pretrain:
# Use all offline data for pretrain
batch_size = config.offline_model_dataset["batch"] * config.offline_model_dataset["length"]
model_train_steps = train_replay._loaded_steps // batch_size - 1
else:
model_train_steps = config.offline_model_train_steps
model_step = common.Counter(0)
while model_step < model_train_steps:
model_step.increment()
mets = train_agent(next(train_dataset))
# save model every 1000
if int(model_step.value) % 1000 == 0:
agnt.save(logdir / 'variables.pkl')
stats['num_trains'] += 1
# save
pickle.dump(stats, open(f"{logdir}/stats.pkl", "wb"))
agnt.save(logdir / 'variables.pkl')
# closing all envs
for env in train_envs + eval_envs:
try:
env.close()
except Exception:
pass
|
cascade-main
|
dreamerv2/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import os
import sys
import threading
import traceback
import cloudpickle
import gym
import numpy as np
from .cdmc import make_dmc_all
from .recorder import Recorder
class GymWrapper:
def __init__(self, env, obs_key='image', act_key='action'):
self._env = env
self._obs_is_dict = hasattr(self._env.observation_space, 'spaces')
self._act_is_dict = hasattr(self._env.action_space, 'spaces')
self._obs_key = obs_key
self._act_key = act_key
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
if self._obs_is_dict:
spaces = self._env.observation_space.spaces.copy()
else:
spaces = {self._obs_key: self._env.observation_space}
return {
**spaces,
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
if self._act_is_dict:
return self._env.action_space.spaces.copy()
else:
return {self._act_key: self._env.action_space}
def step(self, action):
if not self._act_is_dict:
action = action[self._act_key]
obs, reward, done, _, info = self._env.step(action)
if not self._obs_is_dict:
obs = {self._obs_key: obs}
obs['reward'] = float(reward)
obs['is_first'] = False
obs['is_last'] = done
obs['is_terminal'] = info.get('is_terminal', done)
return obs
def reset(self):
obs = self._env.reset()[0]
if not self._obs_is_dict:
obs = {self._obs_key: obs}
# print("obs 1:", obs)
obs['reward'] = 0.0
obs['is_first'] = True
obs['is_last'] = False
obs['is_terminal'] = False
return obs
def make_minigrid_env(task, fix_seed, seed):
import gym_minigrid
env = gym.make("MiniGrid-"+task)
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env)
if fix_seed:
env = gym_minigrid.wrappers.ReseedWrapper(env, seeds=[seed])
env = GymWrapper(env)
env = ResizeImage(env)
if hasattr(env.act_space['action'], 'n'):
env = OneHotAction(env)
else:
env = NormalizeAction(env)
return env
class DMC:
def __init__(self, name, action_repeat=1, size=(64, 64), camera=None, save_path=None):
os.environ['MUJOCO_GL'] = 'egl'
domain, task = name.split('_', 1)
if task == 'all':
self._dict_reward = True
else:
self._dict_reward = False
if domain == 'cup': # Only domain with multiple words.
domain = 'ball_in_cup'
if domain == 'manip':
from dm_control import manipulation
self._env = manipulation.load(task + '_vision')
elif domain == 'locom':
from dm_control.locomotion.examples import basic_rodent_2020
self._env = getattr(basic_rodent_2020, task)()
elif task == 'all':
import time
seed = int(str(int((time.time()*10000)))[-6:]) # random seed generator
self._env = make_dmc_all(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=False)
else:
from dm_control import suite
self._env = suite.load(domain, task)
self._action_repeat = action_repeat
self._size = size
if camera in (-1, None):
camera = dict(
quadruped_walk=2, quadruped_run=2, quadruped_escape=2,
quadruped_fetch=2, locom_rodent_maze_forage=1,
locom_rodent_two_touch=1,
).get(name, 0)
self._camera = camera
self._ignored_keys = []
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
for key, value in self._env.observation_spec().items():
if value.shape == (0,):
print(f"Ignoring empty observation key '{key}'.")
self._ignored_keys.append(key)
@property
def obs_space(self):
spaces = {
'image': gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
for key, value in self._env.observation_spec().items():
if key in self._ignored_keys:
continue
if value.dtype == np.float64:
spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, np.float32)
elif value.dtype == np.uint8:
spaces[key] = gym.spaces.Box(0, 255, value.shape, np.uint8)
else:
raise NotImplementedError(value.dtype)
return spaces
@property
def act_space(self):
spec = self._env.action_spec()
action = gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32)
return {'action': action}
def step(self, action):
assert np.isfinite(action['action']).all(), action['action']
if self._dict_reward:
reward = []
else:
reward = 0.0
for _ in range(self._action_repeat):
time_step = self._env.step(action['action'])
if self._dict_reward:
curr_reward = []
for key, val in time_step.reward.items():
curr_reward.append(val)
if len(reward) == 0:
reward = curr_reward
else:
reward = [sum(x) for x in zip(reward, curr_reward)]
else:
reward += time_step.reward or 0.0
if time_step.last():
break
assert time_step.discount in (0, 1)
image = self._env.physics.render(*self._size, camera_id=self._camera)
obs = {
'reward': reward,
'is_first': False,
'is_last': time_step.last(),
'is_terminal': time_step.discount == 0,
'image': image,
}
obs.update({
k: v for k, v in dict(time_step.observation).items()
if k not in self._ignored_keys})
return obs
def reset(self):
time_step = self._env.reset()
obs = {
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
'image': self._env.physics.render(*self._size, camera_id=self._camera),
}
obs.update({
k: v for k, v in dict(time_step.observation).items()
if k not in self._ignored_keys})
return obs
class Atari:
LOCK = threading.Lock()
def __init__(
self, name, action_repeat=4, size=(84, 84), grayscale=True, noops=30,
life_done=False, sticky=True, all_actions=False, save_path=None):
assert size[0] == size[1]
import gym.wrappers
import gym.envs.atari
if name == 'james_bond':
name = 'jamesbond'
with self.LOCK:
env = gym.envs.atari.AtariEnv(
game=name, obs_type='rgb', frameskip=1,
repeat_action_probability=0.25 if sticky else 0.0,
full_action_space=all_actions)
# Avoid unnecessary rendering in inner env.
env._get_obs = lambda: None
# Tell wrapper that the inner env has no action repeat.
env.spec = gym.envs.registration.EnvSpec('NoFrameskip-v0')
self._env = gym.wrappers.AtariPreprocessing(
env, noops, action_repeat, size[0], life_done, grayscale)
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
self._size = size
self._grayscale = grayscale
@property
def obs_space(self):
shape = self._size + (1 if self._grayscale else 3,)
return {
'image': gym.spaces.Box(0, 255, shape, np.uint8),
'ram': gym.spaces.Box(0, 255, (128,), np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
return {'action': self._env.action_space}
def step(self, action):
image, reward, done, info = self._env.step(action['action'])
if self._grayscale:
image = image[..., None]
return {
'image': image,
'ram': self._env.env.ale.getRAM(), #if not self.record_video else self._env._env.env.ale.getRAM(),
'reward': reward,
'is_first': False,
'is_last': done,
'is_terminal': done,
}
def reset(self):
with self.LOCK:
image = self._env.reset()
if self._grayscale:
image = image[..., None]
return {
'image': image,
'ram': self._env.env.ale.getRAM(), #if not self.record_video else self._env._env.env.ale.getRAM(),
# 'ram': self._env.env._get_ram() if not self.record_video else self._env._env.env._get_ram(),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
}
def close(self):
return self._env.close()
class Crafter:
def __init__(self, outdir=None, reward=True, seed=None, save_path=None):
import crafter
self._env = crafter.Env(reward=reward, seed=seed)
self._env = Recorder(
self._env, outdir,
save_stats=True,
save_video=False,
save_episode=False,
)
if save_path:
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
self._achievements = crafter.constants.achievements.copy()
@property
def obs_space(self):
spaces = {
'image': self._env.observation_space,
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
'log_reward': gym.spaces.Box(-np.inf, np.inf, (), np.float32),
}
spaces.update({
f'log_achievement_{k}': gym.spaces.Box(0, 2 ** 31 - 1, (), np.int32)
for k in self._achievements})
return spaces
@property
def act_space(self):
return {'action': self._env.action_space}
def step(self, action):
image, reward, done, info = self._env.step(action['action'])
obs = {
'image': image,
'reward': reward,
'is_first': False,
'is_last': done,
'is_terminal': info['discount'] == 0,
'log_reward': info['reward'],
}
obs.update({
f'log_achievement_{k}': v
for k, v in info['achievements'].items()})
return obs
def reset(self):
obs = {
'image': self._env.reset(),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
'log_reward': 0.0,
}
obs.update({
f'log_achievement_{k}': 0
for k in self._achievements})
return obs
class Dummy:
def __init__(self):
pass
@property
def obs_space(self):
return {
'image': gym.spaces.Box(0, 255, (64, 64, 3), dtype=np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
return {'action': gym.spaces.Box(-1, 1, (6,), dtype=np.float32)}
def step(self, action):
return {
'image': np.zeros((64, 64, 3)),
'reward': 0.0,
'is_first': False,
'is_last': False,
'is_terminal': False,
}
def reset(self):
return {
'image': np.zeros((64, 64, 3)),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
}
class TimeLimit:
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
def step(self, action):
assert self._step is not None, 'Must reset environment.'
obs = self._env.step(action)
self._step += 1
if self._duration and self._step >= self._duration:
obs['is_last'] = True
self._step = None
return obs
def reset(self):
self._step = 0
return self._env.reset()
class NormalizeAction:
def __init__(self, env, key='action'):
self._env = env
self._key = key
space = env.act_space[key]
self._mask = np.isfinite(space.low) & np.isfinite(space.high)
self._low = np.where(self._mask, space.low, -1)
self._high = np.where(self._mask, space.high, 1)
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def act_space(self):
low = np.where(self._mask, -np.ones_like(self._low), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
space = gym.spaces.Box(low, high, dtype=np.float32)
return {**self._env.act_space, self._key: space}
def step(self, action):
orig = (action[self._key] + 1) / 2 * (self._high - self._low) + self._low
orig = np.where(self._mask, orig, action[self._key])
return self._env.step({**action, self._key: orig})
class OneHotAction:
def __init__(self, env, key='action'):
assert hasattr(env.act_space[key], 'n')
self._env = env
self._key = key
self._random = np.random.RandomState()
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def act_space(self):
shape = (self._env.act_space[self._key].n,)
space = gym.spaces.Box(low=0, high=1, shape=shape, dtype=np.float32)
space.sample = self._sample_action
space.n = shape[0]
return {**self._env.act_space, self._key: space}
def step(self, action):
index = np.argmax(action[self._key]).astype(int)
reference = np.zeros_like(action[self._key])
reference[index] = 1
if not np.allclose(reference, action[self._key]):
raise ValueError(f'Invalid one-hot action:\n{action}')
return self._env.step({**action, self._key: index})
def reset(self):
return self._env.reset()
def _sample_action(self):
actions = self._env.act_space.n
index = self._random.randint(0, actions)
reference = np.zeros(actions, dtype=np.float32)
reference[index] = 1.0
return reference
class ResizeImage:
def __init__(self, env, size=(64, 64)):
self._env = env
self._size = size
self._keys = [
k for k, v in env.obs_space.items()
if v.shape and len(v.shape) > 1 and v.shape[:2] != size]
print(f'Resizing keys {",".join(self._keys)} to {self._size}.')
if self._keys:
from PIL import Image
self._Image = Image
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
spaces = self._env.obs_space
new_space = {}
for key in self._keys:
shape = self._size + spaces[key].shape[2:]
new_space[key] = gym.spaces.Box(0, 255, shape, np.uint8)
return new_space
def step(self, action):
obs = self._env.step(action)
for key in self._keys:
obs[key] = self._resize(obs[key])
return obs
def reset(self):
obs = self._env.reset()
for key in self._keys:
obs[key] = self._resize(obs[key])
return obs
def _resize(self, image):
image = self._Image.fromarray(image)
image = image.resize(self._size, self._Image.NEAREST)
image = np.array(image)
return image
class RenderImage:
def __init__(self, env, key='image'):
self._env = env
self._key = key
self._shape = self._env.render().shape
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
spaces = self._env.obs_space
spaces[self._key] = gym.spaces.Box(0, 255, self._shape, np.uint8)
return spaces
def step(self, action):
obs = self._env.step(action)
obs[self._key] = self._env.render('rgb_array')
return obs
def reset(self):
obs = self._env.reset()
obs[self._key] = self._env.render('rgb_array')
return obs
class Async:
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_CLOSE = 4
_EXCEPTION = 5
def __init__(self, constructor, strategy='thread'):
self._pickled_ctor = cloudpickle.dumps(constructor)
if strategy == 'process':
import multiprocessing as mp
context = mp.get_context('spawn')
elif strategy == 'thread':
import multiprocessing.dummy as context
else:
raise NotImplementedError(strategy)
self._strategy = strategy
self._conn, conn = context.Pipe()
self._process = context.Process(target=self._worker, args=(conn,))
atexit.register(self.close)
self._process.start()
self._receive() # Ready.
self._obs_space = None
self._act_space = None
def access(self, name):
self._conn.send((self._ACCESS, name))
return self._receive
def call(self, name, *args, **kwargs):
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
pass # The connection was already closed.
self._process.join(5)
@property
def obs_space(self):
if not self._obs_space:
self._obs_space = self.access('obs_space')()
return self._obs_space
@property
def act_space(self):
if not self._act_space:
self._act_space = self.access('act_space')()
return self._act_space
def step(self, action, blocking=False):
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=False):
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
try:
message, payload = self._conn.recv()
except (OSError, EOFError):
raise RuntimeError('Lost connection to environment worker.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, conn):
try:
ctor = cloudpickle.loads(self._pickled_ctor)
env = ctor()
conn.send((self._RESULT, None)) # Ready.
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
finally:
try:
conn.close()
except IOError:
pass # The connection was already closed.
|
cascade-main
|
dreamerv2/common/envs.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.