Spaces:
Runtime error
Runtime error
# Copyright 2024 EPFL and Apple Inc. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# -------------------------------------------------------- | |
# Some functions are based on the timm code base | |
# https://github.com/huggingface/pytorch-image-models | |
# -------------------------------------------------------- | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from einops import rearrange | |
def pair(t): | |
return t if isinstance(t, tuple) else (t, t) | |
def softmax1(tensor): | |
# See https://www.evanmiller.org/attention-is-off-by-one.html | |
return F.pad(tensor, (0,1)).softmax(dim=-1)[...,:-1] | |
def build_1d_sincos_posemb(max_len, embed_dim=1024, temperature=10000.): | |
"""Sine-cosine positional embeddings from MoCo-v3, adapted back to 1d | |
Returns positional embedding of shape (1, N, D) | |
""" | |
arange = torch.arange(max_len, dtype=torch.float32) # Shape (N,) | |
assert embed_dim % 2 == 0, 'Embed dimension must be divisible by 2 for 1D sin-cos position embedding' | |
pos_dim = embed_dim // 2 | |
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim # Shape (D/2,) | |
omega = 1. / (temperature ** omega) | |
out = torch.einsum('n,d->nd', [arange, omega]) # Outer product, shape (N, D/2) | |
pos_emb = torch.cat([torch.sin(out), torch.cos(out)], dim=1).unsqueeze(0) # Shape (1, N, D) | |
return pos_emb | |
def build_2d_sincos_posemb(h, w, embed_dim=1024, temperature=10000.0): | |
"""Sine-cosine positional embeddings as used in MoCo-v3 | |
Returns positional embedding of shape (1, N, D) where N = W*H | |
""" | |
grid_w = torch.arange(w, dtype=torch.float32) # Shape (W,) | |
grid_h = torch.arange(h, dtype=torch.float32) # Shape (H, ) | |
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing='ij') # Shapes (W, H) | |
assert embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' | |
pos_dim = embed_dim // 4 | |
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim # Shape (D/4,) | |
omega = 1. / (temperature ** omega) | |
out_w = torch.einsum('n,d->nd', [grid_w.reshape(-1), omega]) # Outer product, shape (W*H, D/4) | |
out_h = torch.einsum('n,d->nd', [grid_h.reshape(-1), omega]) # Outer product, shape (W*H, D/4) | |
pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1).unsqueeze(0) # Shape (1, W*H, D) | |
return pos_emb | |
def drop_path(x, drop_prob: float = 0., training: bool = False): | |
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). | |
Implementation from timm: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py | |
""" | |
if drop_prob == 0. or not training: | |
return x | |
keep_prob = 1 - drop_prob | |
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets | |
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) | |
random_tensor.floor_() # binarize | |
output = x.div(keep_prob) * random_tensor | |
return output | |
class DropPath(nn.Module): | |
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). | |
""" | |
def __init__(self, drop_prob=None): | |
super(DropPath, self).__init__() | |
self.drop_prob = drop_prob | |
def forward(self, x): | |
return drop_path(x, self.drop_prob, self.training) | |
def extra_repr(self) -> str: | |
return 'p={}'.format(self.drop_prob) | |
class LayerNorm(nn.Module): | |
"""Custom implementation of LayerNorm with the option to disable the bias term""" | |
def __init__(self, normalized_shape: int, eps=1e-5, bias=True): | |
super().__init__() | |
self.eps = eps | |
self.weight = nn.Parameter(torch.ones(normalized_shape)) | |
if bias: | |
self.bias = nn.Parameter(torch.zeros(normalized_shape)) | |
else: | |
self.register_buffer("bias", torch.zeros(normalized_shape)) | |
# Normalized shape must be a tuple for F.layer_norm | |
self.normalized_shape = (normalized_shape,) | |
def forward(self, x): | |
return nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, eps=self.eps) | |
class Mlp(nn.Module): | |
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., bias=True): | |
super().__init__() | |
out_features = out_features or in_features | |
hidden_features = hidden_features or in_features | |
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) | |
self.act = act_layer() | |
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) | |
self.drop = nn.Dropout(drop) | |
def forward(self, x): | |
x = self.fc1(x) | |
x = self.act(x) | |
x = self.fc2(x) | |
x = self.drop(x) | |
return x | |
class GatedMlp(nn.Module): | |
"""Implements SwiGLU and other gated feed-forward layers from Noam Shazeer's paper: https://arxiv.org/abs/2002.05202 | |
""" | |
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, bias=True): | |
super().__init__() | |
out_features = out_features or in_features | |
# If gated, multiply hidden_dim by 2/3 to account for extra matmul | |
hidden_features = int(2 * (hidden_features or in_features) / 3) | |
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) | |
self.act = act_layer() | |
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) | |
self.fc3 = nn.Linear(in_features, hidden_features, bias=bias) | |
def forward(self, x): | |
x = self.fc2(self.act(self.fc1(x)) * self.fc3(x)) | |
return x | |
class Attention(nn.Module): | |
def __init__(self, dim, num_heads=8, qkv_bias=False, proj_bias=True, attn_drop=0., proj_drop=0., allow_zero_attn=False): | |
super().__init__() | |
self.num_heads = num_heads | |
head_dim = dim // num_heads | |
self.scale = head_dim ** -0.5 | |
self.allow_zero_attn = allow_zero_attn | |
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) | |
self.attn_drop = nn.Dropout(attn_drop) | |
self.proj = nn.Linear(dim, dim, bias=proj_bias) | |
self.proj_drop = nn.Dropout(proj_drop) | |
def forward(self, x, mask=None): | |
B, N, C = x.shape | |
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) | |
attn = (q @ k.transpose(-2, -1)) * self.scale | |
if mask is not None: | |
mask = mask.unsqueeze(1) # Unsqueeze attention mask for multi-head | |
attn = attn.masked_fill(mask, -torch.finfo(attn.dtype).max) | |
if self.allow_zero_attn: | |
attn = softmax1(attn) | |
else: | |
attn = attn.softmax(dim=-1) | |
attn = self.attn_drop(attn) | |
x = (attn @ v).transpose(1, 2).reshape(B, N, C) | |
x = self.proj(x) | |
x = self.proj_drop(x) | |
return x | |
class CrossAttention(nn.Module): | |
def __init__(self, dim, num_heads=8, qkv_bias=False, proj_bias=True, attn_drop=0., proj_drop=0., allow_zero_attn=False): | |
super().__init__() | |
self.num_heads = num_heads | |
head_dim = dim // num_heads | |
self.scale = head_dim ** -0.5 | |
self.allow_zero_attn = allow_zero_attn | |
self.q = nn.Linear(dim, dim, bias=qkv_bias) | |
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) | |
self.attn_drop = nn.Dropout(attn_drop) | |
self.proj = nn.Linear(dim, dim, bias=proj_bias) | |
self.proj_drop = nn.Dropout(proj_drop) | |
def forward(self, x, context, mask=None): | |
B, N, C = x.shape | |
_, M, _ = context.shape | |
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) | |
kv = self.kv(context).reshape(B, M, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
k, v = kv[0], kv[1] | |
attn = (q @ k.transpose(-2, -1)) * self.scale | |
if mask is not None: | |
mask = rearrange(mask, "b n m -> b 1 n m") # Unsqueeze / reshape for multi-head | |
attn = attn.masked_fill(mask, -torch.finfo(attn.dtype).max) | |
if self.allow_zero_attn: | |
attn = softmax1(attn) | |
else: | |
attn = attn.softmax(dim=-1) | |
attn = self.attn_drop(attn) | |
x = (attn @ v).transpose(1, 2).reshape(B, N, -1) | |
x = self.proj(x) | |
x = self.proj_drop(x) | |
return x | |
class NormAttention(nn.Module): | |
def __init__(self, dim, num_heads=8, qkv_bias=False, proj_bias=True, norm_layer=nn.LayerNorm, attn_drop=0., proj_drop=0., allow_zero_attn=False): | |
super().__init__() | |
self.num_heads = num_heads | |
head_dim = dim // num_heads | |
self.scale = head_dim ** -0.5 | |
self.allow_zero_attn = allow_zero_attn | |
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) | |
self.attn_drop = nn.Dropout(attn_drop) | |
self.proj = nn.Linear(dim, dim, bias=proj_bias) | |
self.proj_drop = nn.Dropout(proj_drop) | |
self.q_norm = norm_layer(head_dim) | |
self.k_norm = norm_layer(head_dim) | |
def forward(self, x, mask=None): | |
B, N, C = x.shape | |
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) | |
q = self.q_norm(q) | |
k = self.k_norm(k) | |
attn = (q @ k.transpose(-2, -1)) * self.scale | |
if mask is not None: | |
mask = mask.unsqueeze(1) # Unsqueeze for multi-head | |
attn = attn.masked_fill(mask, -torch.finfo(attn.dtype).max) | |
if self.allow_zero_attn: | |
attn = softmax1(attn) | |
else: | |
attn = attn.softmax(dim=-1) | |
attn = self.attn_drop(attn) | |
x = (attn @ v).transpose(1, 2).reshape(B, N, C) | |
x = self.proj(x) | |
x = self.proj_drop(x) | |
return x | |
class NormCrossAttention(nn.Module): | |
def __init__(self, dim, num_heads=8, qkv_bias=False, proj_bias=True, norm_layer=nn.LayerNorm, attn_drop=0., proj_drop=0., allow_zero_attn=False): | |
super().__init__() | |
self.num_heads = num_heads | |
head_dim = dim // num_heads | |
self.scale = head_dim ** -0.5 | |
self.allow_zero_attn = allow_zero_attn | |
self.q = nn.Linear(dim, dim, bias=qkv_bias) | |
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) | |
self.attn_drop = nn.Dropout(attn_drop) | |
self.proj = nn.Linear(dim, dim, bias=proj_bias) | |
self.proj_drop = nn.Dropout(proj_drop) | |
self.q_norm = norm_layer(head_dim) | |
self.k_norm = norm_layer(head_dim) | |
def forward(self, x, context, mask=None): | |
B, N, C = x.shape | |
_, M, _ = context.shape | |
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) | |
kv = self.kv(context).reshape(B, M, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
k, v = kv[0], kv[1] | |
q = self.q_norm(q) | |
k = self.k_norm(k) | |
attn = (q @ k.transpose(-2, -1)) * self.scale | |
if mask is not None: | |
mask = rearrange(mask, "b n m -> b 1 n m") # Unsqueeze / reshape for multi-head | |
attn = attn.masked_fill(mask, -torch.finfo(attn.dtype).max) | |
if self.allow_zero_attn: | |
attn = softmax1(attn) | |
else: | |
attn = attn.softmax(dim=-1) | |
attn = self.attn_drop(attn) | |
x = (attn @ v).transpose(1, 2).reshape(B, N, -1) | |
x = self.proj(x) | |
x = self.proj_drop(x) | |
return x | |
class Block(nn.Module): | |
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=True, proj_bias=True, mlp_bias=True, drop=0., attn_drop=0., | |
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, gated_mlp=False, qk_norm=False, allow_zero_attn=False): | |
super().__init__() | |
self.norm1 = norm_layer(dim) | |
if not qk_norm: | |
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
else: | |
self.attn = NormAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, norm_layer=norm_layer, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() | |
self.norm2 = norm_layer(dim) | |
mlp_hidden_dim = int(dim * mlp_ratio) | |
if not gated_mlp: | |
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, bias=mlp_bias, drop=drop) | |
else: | |
self.mlp = GatedMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, bias=mlp_bias) | |
def forward(self, x, mask=None): | |
x = x + self.drop_path(self.attn(self.norm1(x), mask)) | |
x = x + self.drop_path(self.mlp(self.norm2(x))) | |
return x | |
class DecoderBlock(nn.Module): | |
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=True, proj_bias=True, mlp_bias=True, drop=0., attn_drop=0., | |
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, gated_mlp=False, qk_norm=False, allow_zero_attn=False): | |
super().__init__() | |
self.norm1 = norm_layer(dim) | |
if not qk_norm: | |
self.self_attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
self.cross_attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
else: | |
self.self_attn = NormAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, norm_layer=norm_layer, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
self.cross_attn = NormCrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, norm_layer=norm_layer, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
self.query_norm = norm_layer(dim) | |
self.context_norm = norm_layer(dim) | |
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() | |
self.norm2 = norm_layer(dim) | |
mlp_hidden_dim = int(dim * mlp_ratio) | |
if not gated_mlp: | |
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, bias=mlp_bias, drop=drop) | |
else: | |
self.mlp = GatedMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, bias=mlp_bias) | |
def forward(self, x, context, sa_mask=None, xa_mask=None): | |
x = x + self.drop_path(self.self_attn(self.norm1(x), sa_mask)) | |
x = x + self.drop_path(self.cross_attn(self.query_norm(x), self.context_norm(context), xa_mask)) | |
x = x + self.drop_path(self.mlp(self.norm2(x))) | |
return x | |
class CrossAttentionBlock(nn.Module): | |
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., | |
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, gated_mlp=False, allow_zero_attn=False): | |
super().__init__() | |
self.cross_attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, allow_zero_attn=allow_zero_attn) | |
self.query_norm = norm_layer(dim) | |
self.context_norm = norm_layer(dim) | |
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() | |
self.norm2 = norm_layer(dim) | |
mlp_hidden_dim = int(dim * mlp_ratio) | |
if not gated_mlp: | |
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) | |
else: | |
self.mlp = GatedMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) | |
def forward(self, x, context, xa_mask=None, **kwargs): | |
x = x + self.drop_path(self.cross_attn(self.query_norm(x), self.context_norm(context), xa_mask)) | |
x = x + self.drop_path(self.mlp(self.norm2(x))) | |
return x | |