Spaces:
Running
Running
File size: 9,125 Bytes
9bf4bd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, List, Optional, Sequence, Union
import torch
import torch.nn as nn
from mmcv.cnn.bricks.transformer import BaseTransformerLayer
from mmengine.model import ModuleList
from mmocr.models.common.dictionary import Dictionary
from mmocr.models.common.modules import PositionalEncoding
from mmocr.registry import MODELS
from mmocr.structures import TextRecogDataSample
from .base import BaseDecoder
@MODELS.register_module()
class ABILanguageDecoder(BaseDecoder):
r"""Transformer-based language model responsible for spell correction.
Implementation of language model of \
`ABINet <https://arxiv.org/pdf/2103.06495>`_.
Args:
dictionary (dict or :obj:`Dictionary`): The config for `Dictionary` or
the instance of `Dictionary`. The dictionary must have an end
token.
d_model (int): Hidden size :math:`E` of model. Defaults to 512.
n_head (int): Number of multi-attention heads.
d_inner (int): Hidden size of feedforward network model.
n_layers (int): The number of similar decoding layers.
dropout (float): Dropout rate.
detach_tokens (bool): Whether to block the gradient flow at input
tokens.
use_self_attn (bool): If True, use self attention in decoder layers,
otherwise cross attention will be used.
max_seq_len (int): Maximum sequence length :math:`T`. The
sequence is usually generated from decoder. Defaults to 40.
module_loss (dict, optional): Config to build loss. Defaults to None.
postprocessor (dict, optional): Config to build postprocessor.
Defaults to None.
init_cfg (dict or list[dict], optional): Initialization configs.
Defaults to None.
"""
def __init__(self,
dictionary: Union[Dict, Dictionary],
d_model: int = 512,
n_head: int = 8,
d_inner: int = 2048,
n_layers: int = 4,
dropout: float = 0.1,
detach_tokens: bool = True,
use_self_attn: bool = False,
max_seq_len: int = 40,
module_loss: Optional[Dict] = None,
postprocessor: Optional[Dict] = None,
init_cfg: Optional[Union[Dict, List[Dict]]] = None,
**kwargs) -> None:
super().__init__(
dictionary=dictionary,
module_loss=module_loss,
postprocessor=postprocessor,
max_seq_len=max_seq_len,
init_cfg=init_cfg)
assert self.dictionary.end_idx is not None,\
'Dictionary must contain an end token! (with_end=True)'
self.detach_tokens = detach_tokens
self.d_model = d_model
self.proj = nn.Linear(self.dictionary.num_classes, d_model, False)
self.token_encoder = PositionalEncoding(
d_model, n_position=self.max_seq_len, dropout=0.1)
self.pos_encoder = PositionalEncoding(
d_model, n_position=self.max_seq_len)
if use_self_attn:
operation_order = ('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')
else:
operation_order = ('cross_attn', 'norm', 'ffn', 'norm')
decoder_layer = BaseTransformerLayer(
operation_order=operation_order,
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=d_model,
num_heads=n_head,
attn_drop=dropout,
dropout_layer=dict(type='Dropout', drop_prob=dropout),
),
ffn_cfgs=dict(
type='FFN',
embed_dims=d_model,
feedforward_channels=d_inner,
ffn_drop=dropout,
),
norm_cfg=dict(type='LN'),
)
self.decoder_layers = ModuleList(
[copy.deepcopy(decoder_layer) for _ in range(n_layers)])
self.cls = nn.Linear(d_model, self.dictionary.num_classes)
def forward_train(
self,
feat: Optional[torch.Tensor] = None,
out_enc: torch.Tensor = None,
data_samples: Optional[Sequence[TextRecogDataSample]] = None
) -> Dict:
"""
Args:
feat (torch.Tensor, optional): Not required. Feature map
placeholder. Defaults to None.
out_enc (torch.Tensor): Logits with shape :math:`(N, T, C)`.
Defaults to None.
data_samples (list[TextRecogDataSample], optional): Not required.
DataSample placeholder. Defaults to None.
Returns:
A dict with keys ``feature`` and ``logits``.
- feature (Tensor): Shape :math:`(N, T, E)`. Raw textual features
for vision language aligner.
- logits (Tensor): Shape :math:`(N, T, C)`. The raw logits for
characters after spell correction.
"""
lengths = self._get_length(out_enc)
lengths.clamp_(2, self.max_seq_len)
tokens = torch.softmax(out_enc, dim=-1)
if self.detach_tokens:
tokens = tokens.detach()
embed = self.proj(tokens) # (N, T, E)
embed = self.token_encoder(embed) # (N, T, E)
padding_mask = self._get_padding_mask(lengths, self.max_seq_len)
zeros = embed.new_zeros(*embed.shape)
query = self.pos_encoder(zeros)
query = query.permute(1, 0, 2) # (T, N, E)
embed = embed.permute(1, 0, 2)
location_mask = self._get_location_mask(self.max_seq_len,
tokens.device)
output = query
for m in self.decoder_layers:
output = m(
query=output,
key=embed,
value=embed,
attn_masks=location_mask,
key_padding_mask=padding_mask)
output = output.permute(1, 0, 2) # (N, T, E)
out_enc = self.cls(output) # (N, T, C)
return {'feature': output, 'logits': out_enc}
def forward_test(
self,
feat: Optional[torch.Tensor] = None,
logits: torch.Tensor = None,
data_samples: Optional[Sequence[TextRecogDataSample]] = None
) -> Dict:
"""
Args:
feat (torch.Tensor, optional): Not required. Feature map
placeholder. Defaults to None.
logits (Tensor): Raw language logitis. Shape :math:`(N, T, C)`.
Defaults to None.
data_samples (list[TextRecogDataSample], optional): Not required.
DataSample placeholder. Defaults to None.
Returns:
A dict with keys ``feature`` and ``logits``.
- feature (Tensor): Shape :math:`(N, T, E)`. Raw textual features
for vision language aligner.
- logits (Tensor): Shape :math:`(N, T, C)`. The raw logits for
characters after spell correction.
"""
return self.forward_train(feat, logits, data_samples)
def _get_length(self, logit: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""Greedy decoder to obtain length from logit.
Returns the first location of padding index or the length of the entire
tensor otherwise.
"""
# out as a boolean vector indicating the existence of end token(s)
out = (logit.argmax(dim=-1) == self.dictionary.end_idx)
abn = out.any(dim)
# Get the first index of end token
out = ((out.cumsum(dim) == 1) & out).max(dim)[1]
out = out + 1
out = torch.where(abn, out, out.new_tensor(logit.shape[1]))
return out
@staticmethod
def _get_location_mask(seq_len: int,
device: Union[Optional[torch.device],
str] = None) -> torch.Tensor:
"""Generate location masks given input sequence length.
Args:
seq_len (int): The length of input sequence to transformer.
device (torch.device or str, optional): The device on which the
masks will be placed.
Returns:
Tensor: A mask tensor of shape (seq_len, seq_len) with -infs on
diagonal and zeros elsewhere.
"""
mask = torch.eye(seq_len, device=device)
mask = mask.float().masked_fill(mask == 1, float('-inf'))
return mask
@staticmethod
def _get_padding_mask(length: int, max_length: int) -> torch.Tensor:
"""Generate padding masks.
Args:
length (Tensor): Shape :math:`(N,)`.
max_length (int): The maximum sequence length :math:`T`.
Returns:
Tensor: A bool tensor of shape :math:`(N, T)` with Trues on
elements located over the length, or Falses elsewhere.
"""
length = length.unsqueeze(-1)
grid = torch.arange(0, max_length, device=length.device).unsqueeze(0)
return grid >= length
|