repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/fcn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class FCNHead(BaseDecodeHead):
"""Fully Convolution Networks for Semantic Segmentation.
This head is implemented of `FCNNet <https://arxiv.org/abs/1411.4038>`_.
Args:
num_convs (int): Number of convs in the head. Default: 2.
kernel_size (int): The kernel size for convs in the head. Default: 3.
concat_input (bool): Whether concat the input and output of convs
before classification layer.
dilation (int): The dilation rate for convs in the head. Default: 1.
"""
def __init__(self,
num_convs=2,
kernel_size=3,
concat_input=True,
dilation=1,
**kwargs):
assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
self.num_convs = num_convs
self.concat_input = concat_input
self.kernel_size = kernel_size
super(FCNHead, self).__init__(**kwargs)
if num_convs == 0:
assert self.in_channels == self.channels
conv_padding = (kernel_size // 2) * dilation
convs = []
for i in range(num_convs):
_in_channels = self.in_channels if i == 0 else self.channels
convs.append(
ConvModule(
_in_channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if len(convs) == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = ConvModule(
self.in_channels + self.channels,
self.channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _forward_feature(self, inputs):
"""Forward function for feature maps before classifying each pixel with
``self.cls_seg`` fc.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
feats (Tensor): A tensor of shape (batch_size, self.channels,
H, W) which is feature map for last layer of decoder head.
"""
x = self._transform_inputs(inputs)
feats = self.convs(x)
if self.concat_input:
feats = self.conv_cat(torch.cat([x, feats], dim=1))
return feats
def forward(self, inputs):
"""Forward function."""
output = self._forward_feature(inputs)
output = self.cls_seg(output)
return output
| 3,049 | 33.269663 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/fpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import Upsample, resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class FPNHead(BaseDecodeHead):
"""Panoptic Feature Pyramid Networks.
This head is the implementation of `Semantic FPN
<https://arxiv.org/abs/1901.02446>`_.
Args:
feature_strides (tuple[int]): The strides for input feature maps.
stack_lateral. All strides suppose to be power of 2. The first
one is of largest resolution.
"""
def __init__(self, feature_strides, **kwargs):
super(FPNHead, self).__init__(
input_transform='multiple_select', **kwargs)
assert len(feature_strides) == len(self.in_channels)
assert min(feature_strides) == feature_strides[0]
self.feature_strides = feature_strides
self.scale_heads = nn.ModuleList()
for i in range(len(feature_strides)):
head_length = max(
1,
int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
scale_head = []
for k in range(head_length):
scale_head.append(
ConvModule(
self.in_channels[i] if k == 0 else self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if feature_strides[i] != feature_strides[0]:
scale_head.append(
Upsample(
scale_factor=2,
mode='bilinear',
align_corners=self.align_corners))
self.scale_heads.append(nn.Sequential(*scale_head))
def forward(self, inputs):
x = self._transform_inputs(inputs)
output = self.scale_heads[0](x[0])
for i in range(1, len(self.feature_strides)):
# non inplace
output = output + resize(
self.scale_heads[i](x[i]),
size=output.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = self.cls_seg(output)
return output
| 2,437 | 33.828571 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/gc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ContextBlock
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class GCHead(FCNHead):
"""GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond.
This head is the implementation of `GCNet
<https://arxiv.org/abs/1904.11492>`_.
Args:
ratio (float): Multiplier of channels ratio. Default: 1/4.
pooling_type (str): The pooling type of context aggregation.
Options are 'att', 'avg'. Default: 'avg'.
fusion_types (tuple[str]): The fusion type for feature fusion.
Options are 'channel_add', 'channel_mul'. Default: ('channel_add',)
"""
def __init__(self,
ratio=1 / 4.,
pooling_type='att',
fusion_types=('channel_add', ),
**kwargs):
super(GCHead, self).__init__(num_convs=2, **kwargs)
self.ratio = ratio
self.pooling_type = pooling_type
self.fusion_types = fusion_types
self.gc_block = ContextBlock(
in_channels=self.channels,
ratio=self.ratio,
pooling_type=self.pooling_type,
fusion_types=self.fusion_types)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.gc_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output
| 1,639 | 32.469388 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/ham_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Originally from https://github.com/visual-attention-network/segnext
# Licensed under the Apache License, Version 2.0 (the "License")
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
class Matrix_Decomposition_2D_Base(nn.Module):
"""Base class of 2D Matrix Decomposition.
Args:
MD_S (int): The number of spatial coefficient in
Matrix Decomposition, it may be used for calculation
of the number of latent dimension D in Matrix
Decomposition. Defaults: 1.
MD_R (int): The number of latent dimension R in
Matrix Decomposition. Defaults: 64.
train_steps (int): The number of iteration steps in
Multiplicative Update (MU) rule to solve Non-negative
Matrix Factorization (NMF) in training. Defaults: 6.
eval_steps (int): The number of iteration steps in
Multiplicative Update (MU) rule to solve Non-negative
Matrix Factorization (NMF) in evaluation. Defaults: 7.
inv_t (int): Inverted multiple number to make coefficient
smaller in softmax. Defaults: 100.
rand_init (bool): Whether to initialize randomly.
Defaults: True.
"""
def __init__(self,
MD_S=1,
MD_R=64,
train_steps=6,
eval_steps=7,
inv_t=100,
rand_init=True):
super().__init__()
self.S = MD_S
self.R = MD_R
self.train_steps = train_steps
self.eval_steps = eval_steps
self.inv_t = inv_t
self.rand_init = rand_init
def _build_bases(self, B, S, D, R, cuda=False):
raise NotImplementedError
def local_step(self, x, bases, coef):
raise NotImplementedError
def local_inference(self, x, bases):
# (B * S, D, N)^T @ (B * S, D, R) -> (B * S, N, R)
coef = torch.bmm(x.transpose(1, 2), bases)
coef = F.softmax(self.inv_t * coef, dim=-1)
steps = self.train_steps if self.training else self.eval_steps
for _ in range(steps):
bases, coef = self.local_step(x, bases, coef)
return bases, coef
def compute_coef(self, x, bases, coef):
raise NotImplementedError
def forward(self, x, return_bases=False):
"""Forward Function."""
B, C, H, W = x.shape
# (B, C, H, W) -> (B * S, D, N)
D = C // self.S
N = H * W
x = x.view(B * self.S, D, N)
cuda = 'cuda' in str(x.device)
if not self.rand_init and not hasattr(self, 'bases'):
bases = self._build_bases(1, self.S, D, self.R, cuda=cuda)
self.register_buffer('bases', bases)
# (S, D, R) -> (B * S, D, R)
if self.rand_init:
bases = self._build_bases(B, self.S, D, self.R, cuda=cuda)
else:
bases = self.bases.repeat(B, 1, 1)
bases, coef = self.local_inference(x, bases)
# (B * S, N, R)
coef = self.compute_coef(x, bases, coef)
# (B * S, D, R) @ (B * S, N, R)^T -> (B * S, D, N)
x = torch.bmm(bases, coef.transpose(1, 2))
# (B * S, D, N) -> (B, C, H, W)
x = x.view(B, C, H, W)
return x
class NMF2D(Matrix_Decomposition_2D_Base):
"""Non-negative Matrix Factorization (NMF) module.
It is inherited from ``Matrix_Decomposition_2D_Base`` module.
"""
def __init__(self, args=dict()):
super().__init__(**args)
self.inv_t = 1
def _build_bases(self, B, S, D, R, cuda=False):
"""Build bases in initialization."""
if cuda:
bases = torch.rand((B * S, D, R)).cuda()
else:
bases = torch.rand((B * S, D, R))
bases = F.normalize(bases, dim=1)
return bases
def local_step(self, x, bases, coef):
"""Local step in iteration to renew bases and coefficient."""
# (B * S, D, N)^T @ (B * S, D, R) -> (B * S, N, R)
numerator = torch.bmm(x.transpose(1, 2), bases)
# (B * S, N, R) @ [(B * S, D, R)^T @ (B * S, D, R)] -> (B * S, N, R)
denominator = coef.bmm(bases.transpose(1, 2).bmm(bases))
# Multiplicative Update
coef = coef * numerator / (denominator + 1e-6)
# (B * S, D, N) @ (B * S, N, R) -> (B * S, D, R)
numerator = torch.bmm(x, coef)
# (B * S, D, R) @ [(B * S, N, R)^T @ (B * S, N, R)] -> (B * S, D, R)
denominator = bases.bmm(coef.transpose(1, 2).bmm(coef))
# Multiplicative Update
bases = bases * numerator / (denominator + 1e-6)
return bases, coef
def compute_coef(self, x, bases, coef):
"""Compute coefficient."""
# (B * S, D, N)^T @ (B * S, D, R) -> (B * S, N, R)
numerator = torch.bmm(x.transpose(1, 2), bases)
# (B * S, N, R) @ (B * S, D, R)^T @ (B * S, D, R) -> (B * S, N, R)
denominator = coef.bmm(bases.transpose(1, 2).bmm(bases))
# multiplication update
coef = coef * numerator / (denominator + 1e-6)
return coef
class Hamburger(nn.Module):
"""Hamburger Module. It consists of one slice of "ham" (matrix
decomposition) and two slices of "bread" (linear transformation).
Args:
ham_channels (int): Input and output channels of feature.
ham_kwargs (dict): Config of matrix decomposition module.
norm_cfg (dict | None): Config of norm layers.
"""
def __init__(self,
ham_channels=512,
ham_kwargs=dict(),
norm_cfg=None,
**kwargs):
super().__init__()
self.ham_in = ConvModule(
ham_channels, ham_channels, 1, norm_cfg=None, act_cfg=None)
self.ham = NMF2D(ham_kwargs)
self.ham_out = ConvModule(
ham_channels, ham_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
def forward(self, x):
enjoy = self.ham_in(x)
enjoy = F.relu(enjoy, inplace=True)
enjoy = self.ham(enjoy)
enjoy = self.ham_out(enjoy)
ham = F.relu(x + enjoy, inplace=True)
return ham
@HEADS.register_module()
class LightHamHead(BaseDecodeHead):
"""SegNeXt decode head.
This decode head is the implementation of `SegNeXt: Rethinking
Convolutional Attention Design for Semantic
Segmentation <https://arxiv.org/abs/2209.08575>`_.
Inspiration from https://github.com/visual-attention-network/segnext.
Specifically, LightHamHead is inspired by HamNet from
`Is Attention Better Than Matrix Decomposition?
<https://arxiv.org/abs/2109.04553>`.
Args:
ham_channels (int): input channels for Hamburger.
Defaults: 512.
ham_kwargs (int): kwagrs for Ham. Defaults: dict().
"""
def __init__(self, ham_channels=512, ham_kwargs=dict(), **kwargs):
super(LightHamHead, self).__init__(
input_transform='multiple_select', **kwargs)
self.ham_channels = ham_channels
self.squeeze = ConvModule(
sum(self.in_channels),
self.ham_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.hamburger = Hamburger(ham_channels, ham_kwargs, **kwargs)
self.align = ConvModule(
self.ham_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
inputs = self._transform_inputs(inputs)
inputs = [
resize(
level,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for level in inputs
]
inputs = torch.cat(inputs, dim=1)
# apply a conv block to squeeze feature map
x = self.squeeze(inputs)
# apply hamburger module
x = self.hamburger(x)
# apply a conv block to align feature map
output = self.align(x)
output = self.cls_seg(output)
return output
| 8,327 | 31.15444 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/isa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
from .decode_head import BaseDecodeHead
class SelfAttentionBlock(_SelfAttentionBlock):
"""Self-Attention Module.
Args:
in_channels (int): Input channels of key/query feature.
channels (int): Output channels of key/query transform.
conv_cfg (dict | None): Config of conv layers.
norm_cfg (dict | None): Config of norm layers.
act_cfg (dict | None): Config of activation layers.
"""
def __init__(self, in_channels, channels, conv_cfg, norm_cfg, act_cfg):
super(SelfAttentionBlock, self).__init__(
key_in_channels=in_channels,
query_in_channels=in_channels,
channels=channels,
out_channels=in_channels,
share_key_query=False,
query_downsample=None,
key_downsample=None,
key_query_num_convs=2,
key_query_norm=True,
value_out_num_convs=1,
value_out_norm=False,
matmul_norm=True,
with_out=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.output_project = self.build_project(
in_channels,
in_channels,
num_convs=1,
use_conv_module=True,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
"""Forward function."""
context = super(SelfAttentionBlock, self).forward(x, x)
return self.output_project(context)
@HEADS.register_module()
class ISAHead(BaseDecodeHead):
"""Interlaced Sparse Self-Attention for Semantic Segmentation.
This head is the implementation of `ISA
<https://arxiv.org/abs/1907.12273>`_.
Args:
isa_channels (int): The channels of ISA Module.
down_factor (tuple[int]): The local group size of ISA.
"""
def __init__(self, isa_channels, down_factor=(8, 8), **kwargs):
super(ISAHead, self).__init__(**kwargs)
self.down_factor = down_factor
self.in_conv = ConvModule(
self.in_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.global_relation = SelfAttentionBlock(
self.channels,
isa_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.local_relation = SelfAttentionBlock(
self.channels,
isa_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.out_conv = ConvModule(
self.channels * 2,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
x_ = self._transform_inputs(inputs)
x = self.in_conv(x_)
residual = x
n, c, h, w = x.size()
loc_h, loc_w = self.down_factor # size of local group in H- and W-axes
glb_h, glb_w = math.ceil(h / loc_h), math.ceil(w / loc_w)
pad_h, pad_w = glb_h * loc_h - h, glb_w * loc_w - w
if pad_h > 0 or pad_w > 0: # pad if the size is not divisible
padding = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2)
x = F.pad(x, padding)
# global relation
x = x.view(n, c, glb_h, loc_h, glb_w, loc_w)
# do permutation to gather global group
x = x.permute(0, 3, 5, 1, 2, 4) # (n, loc_h, loc_w, c, glb_h, glb_w)
x = x.reshape(-1, c, glb_h, glb_w)
# apply attention within each global group
x = self.global_relation(x) # (n * loc_h * loc_w, c, glb_h, glb_w)
# local relation
x = x.view(n, loc_h, loc_w, c, glb_h, glb_w)
# do permutation to gather local group
x = x.permute(0, 4, 5, 3, 1, 2) # (n, glb_h, glb_w, c, loc_h, loc_w)
x = x.reshape(-1, c, loc_h, loc_w)
# apply attention within each local group
x = self.local_relation(x) # (n * glb_h * glb_w, c, loc_h, loc_w)
# permute each pixel back to its original position
x = x.view(n, glb_h, glb_w, c, loc_h, loc_w)
x = x.permute(0, 3, 1, 4, 2, 5) # (n, c, glb_h, loc_h, glb_w, loc_w)
x = x.reshape(n, c, glb_h * loc_h, glb_w * loc_w)
if pad_h > 0 or pad_w > 0: # remove padding
x = x[:, :, pad_h // 2:pad_h // 2 + h, pad_w // 2:pad_w // 2 + w]
x = self.out_conv(torch.cat([x, residual], dim=1))
out = self.cls_seg(x)
return out
| 4,977 | 33.569444 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/knet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER,
MultiheadAttention,
build_transformer_layer)
from mmseg.models.builder import HEADS, build_head
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
from mmseg.utils import get_root_logger
@TRANSFORMER_LAYER.register_module()
class KernelUpdator(nn.Module):
"""Dynamic Kernel Updator in Kernel Update Head.
Args:
in_channels (int): The number of channels of input feature map.
Default: 256.
feat_channels (int): The number of middle-stage channels in
the kernel updator. Default: 64.
out_channels (int): The number of output channels.
gate_sigmoid (bool): Whether use sigmoid function in gate
mechanism. Default: True.
gate_norm_act (bool): Whether add normalization and activation
layer in gate mechanism. Default: False.
activate_out: Whether add activation after gate mechanism.
Default: False.
norm_cfg (dict | None): Config of norm layers.
Default: dict(type='LN').
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU').
"""
def __init__(
self,
in_channels=256,
feat_channels=64,
out_channels=None,
gate_sigmoid=True,
gate_norm_act=False,
activate_out=False,
norm_cfg=dict(type='LN'),
act_cfg=dict(type='ReLU', inplace=True),
):
super(KernelUpdator, self).__init__()
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.gate_sigmoid = gate_sigmoid
self.gate_norm_act = gate_norm_act
self.activate_out = activate_out
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.feat_channels
self.num_params_out = self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.input_layer = nn.Linear(self.in_channels,
self.num_params_in + self.num_params_out,
1)
self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
if self.gate_norm_act:
self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.activation = build_activation_layer(act_cfg)
self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, update_feature, input_feature):
"""Forward function of KernelUpdator.
Args:
update_feature (torch.Tensor): Feature map assembled from
each group. It would be reshaped with last dimension
shape: `self.in_channels`.
input_feature (torch.Tensor): Intermediate feature
with shape: (N, num_classes, conv_kernel_size**2, channels).
Returns:
Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is
the number of classes, C1 and C2 are the feature map channels of
KernelUpdateHead and KernelUpdator, respectively.
"""
update_feature = update_feature.reshape(-1, self.in_channels)
num_proposals = update_feature.size(0)
# dynamic_layer works for
# phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper
parameters = self.dynamic_layer(update_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels)
# input_layer works for
# phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper
input_feats = self.input_layer(
input_feature.reshape(num_proposals, -1, self.feat_channels))
input_in = input_feats[..., :self.num_params_in]
input_out = input_feats[..., -self.num_params_out:]
# `gate_feats` is F^G in K-Net paper
gate_feats = input_in * param_in.unsqueeze(-2)
if self.gate_norm_act:
gate_feats = self.activation(self.gate_norm(gate_feats))
input_gate = self.input_norm_in(self.input_gate(gate_feats))
update_gate = self.norm_in(self.update_gate(gate_feats))
if self.gate_sigmoid:
input_gate = input_gate.sigmoid()
update_gate = update_gate.sigmoid()
param_out = self.norm_out(param_out)
input_out = self.input_norm_out(input_out)
if self.activate_out:
param_out = self.activation(param_out)
input_out = self.activation(input_out)
# Gate mechanism. Eq.(5) in original paper.
# param_out has shape (batch_size, feat_channels, out_channels)
features = update_gate * param_out.unsqueeze(
-2) + input_gate * input_out
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
@HEADS.register_module()
class KernelUpdateHead(nn.Module):
"""Kernel Update Head in K-Net.
Args:
num_classes (int): Number of classes. Default: 150.
num_ffn_fcs (int): The number of fully-connected layers in
FFNs. Default: 2.
num_heads (int): The number of parallel attention heads.
Default: 8.
num_mask_fcs (int): The number of fully connected layers for
mask prediction. Default: 3.
feedforward_channels (int): The hidden dimension of FFNs.
Defaults: 2048.
in_channels (int): The number of channels of input feature map.
Default: 256.
out_channels (int): The number of output channels.
Default: 256.
dropout (float): The Probability of an element to be
zeroed in MultiheadAttention and FFN. Default 0.0.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU').
ffn_act_cfg (dict): Config of activation layers in FFN.
Default: dict(type='ReLU').
conv_kernel_size (int): The kernel size of convolution in
Kernel Update Head for dynamic kernel updation.
Default: 1.
feat_transform_cfg (dict | None): Config of feature transform.
Default: None.
kernel_init (bool): Whether initiate mask kernel in mask head.
Default: False.
with_ffn (bool): Whether add FFN in kernel update head.
Default: True.
feat_gather_stride (int): Stride of convolution in feature transform.
Default: 1.
mask_transform_stride (int): Stride of mask transform.
Default: 1.
kernel_updator_cfg (dict): Config of kernel updator.
Default: dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')).
"""
def __init__(self,
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=3,
feedforward_channels=2048,
in_channels=256,
out_channels=256,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
ffn_act_cfg=dict(type='ReLU', inplace=True),
conv_kernel_size=1,
feat_transform_cfg=None,
kernel_init=False,
with_ffn=True,
feat_gather_stride=1,
mask_transform_stride=1,
kernel_updator_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'))):
super(KernelUpdateHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.fp16_enabled = False
self.dropout = dropout
self.num_heads = num_heads
self.kernel_init = kernel_init
self.with_ffn = with_ffn
self.conv_kernel_size = conv_kernel_size
self.feat_gather_stride = feat_gather_stride
self.mask_transform_stride = mask_transform_stride
self.attention = MultiheadAttention(in_channels * conv_kernel_size**2,
num_heads, dropout)
self.attention_norm = build_norm_layer(
dict(type='LN'), in_channels * conv_kernel_size**2)[1]
self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg)
if feat_transform_cfg is not None:
kernel_size = feat_transform_cfg.pop('kernel_size', 1)
transform_channels = in_channels
self.feat_transform = ConvModule(
transform_channels,
in_channels,
kernel_size,
stride=feat_gather_stride,
padding=int(feat_gather_stride // 2),
**feat_transform_cfg)
else:
self.feat_transform = None
if self.with_ffn:
self.ffn = FFN(
in_channels,
feedforward_channels,
num_ffn_fcs,
act_cfg=ffn_act_cfg,
dropout=dropout)
self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.mask_fcs = nn.ModuleList()
for _ in range(num_mask_fcs):
self.mask_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.mask_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.mask_fcs.append(build_activation_layer(act_cfg))
self.fc_mask = nn.Linear(in_channels, out_channels)
def init_weights(self):
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
# adopt the default initialization for
# the weight and bias of the layer norm
pass
if self.kernel_init:
logger = get_root_logger()
logger.info(
'mask kernel in mask head is normal initialized by std 0.01')
nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01)
def forward(self, x, proposal_feat, mask_preds, mask_shape=None):
"""Forward function of Dynamic Instance Interactive Head.
Args:
x (Tensor): Feature map from FPN with shape
(batch_size, feature_dimensions, H , W).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size, num_proposals, feature_dimensions)
mask_preds (Tensor): mask prediction from the former stage in shape
(batch_size, num_proposals, H, W).
Returns:
Tuple: The first tensor is predicted mask with shape
(N, num_classes, H, W), the second tensor is dynamic kernel
with shape (N, num_classes, channels, K, K).
"""
N, num_proposals = proposal_feat.shape[:2]
if self.feat_transform is not None:
x = self.feat_transform(x)
C, H, W = x.shape[-3:]
mask_h, mask_w = mask_preds.shape[-2:]
if mask_h != H or mask_w != W:
gather_mask = F.interpolate(
mask_preds, (H, W), align_corners=False, mode='bilinear')
else:
gather_mask = mask_preds
sigmoid_masks = gather_mask.softmax(dim=1)
# Group Feature Assembling. Eq.(3) in original paper.
# einsum is faster than bmm by 30%
x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x)
# obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C]
proposal_feat = proposal_feat.reshape(N, num_proposals,
self.in_channels,
-1).permute(0, 1, 3, 2)
obj_feat = self.kernel_update_conv(x_feat, proposal_feat)
# [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C]
obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2)
obj_feat = self.attention_norm(self.attention(obj_feat))
# [N, B, K*K*C] -> [B, N, K*K*C]
obj_feat = obj_feat.permute(1, 0, 2)
# obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C]
obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels)
# FFN
if self.with_ffn:
obj_feat = self.ffn_norm(self.ffn(obj_feat))
mask_feat = obj_feat
for reg_layer in self.mask_fcs:
mask_feat = reg_layer(mask_feat)
# [B, N, K*K, C] -> [B, N, C, K*K]
mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2)
if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1):
mask_x = F.interpolate(
x, scale_factor=0.5, mode='bilinear', align_corners=False)
H, W = mask_x.shape[-2:]
else:
mask_x = x
# group conv is 5x faster than unfold and uses about 1/5 memory
# Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms
# Group conv vs. unfold vs. concat batch, 278 : 1420 : 369
# but in real training group conv is slower than concat batch
# so we keep using concat batch.
# fold_x = F.unfold(
# mask_x,
# self.conv_kernel_size,
# padding=int(self.conv_kernel_size // 2))
# mask_feat = mask_feat.reshape(N, num_proposals, -1)
# new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x)
# [B, N, C, K*K] -> [B*N, C, K, K]
mask_feat = mask_feat.reshape(N, num_proposals, C,
self.conv_kernel_size,
self.conv_kernel_size)
# [B, C, H, W] -> [1, B*C, H, W]
new_mask_preds = []
for i in range(N):
new_mask_preds.append(
F.conv2d(
mask_x[i:i + 1],
mask_feat[i],
padding=int(self.conv_kernel_size // 2)))
new_mask_preds = torch.cat(new_mask_preds, dim=0)
new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W)
if self.mask_transform_stride == 2:
new_mask_preds = F.interpolate(
new_mask_preds,
scale_factor=2,
mode='bilinear',
align_corners=False)
if mask_shape is not None and mask_shape[0] != H:
new_mask_preds = F.interpolate(
new_mask_preds,
mask_shape,
align_corners=False,
mode='bilinear')
return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape(
N, num_proposals, self.in_channels, self.conv_kernel_size,
self.conv_kernel_size)
@HEADS.register_module()
class IterativeDecodeHead(BaseDecodeHead):
"""K-Net: Towards Unified Image Segmentation.
This head is the implementation of
`K-Net: <https://arxiv.org/abs/2106.14855>`_.
Args:
num_stages (int): The number of stages (kernel update heads)
in IterativeDecodeHead. Default: 3.
kernel_generate_head:(dict): Config of kernel generate head which
generate mask predictions, dynamic kernels and class predictions
for next kernel update heads.
kernel_update_head (dict): Config of kernel update head which refine
dynamic kernels and class predictions iteratively.
"""
def __init__(self, num_stages, kernel_generate_head, kernel_update_head,
**kwargs):
# ``IterativeDecodeHead`` would skip initialization of
# ``BaseDecodeHead`` which would be called when building
# ``self.kernel_generate_head``.
super(BaseDecodeHead, self).__init__(**kwargs)
assert num_stages == len(kernel_update_head)
self.num_stages = num_stages
self.kernel_generate_head = build_head(kernel_generate_head)
self.kernel_update_head = nn.ModuleList()
self.align_corners = self.kernel_generate_head.align_corners
self.num_classes = self.kernel_generate_head.num_classes
self.input_transform = self.kernel_generate_head.input_transform
self.ignore_index = self.kernel_generate_head.ignore_index
self.out_channels = self.num_classes
for head_cfg in kernel_update_head:
self.kernel_update_head.append(build_head(head_cfg))
def forward(self, inputs):
"""Forward function."""
feats = self.kernel_generate_head._forward_feature(inputs)
sem_seg = self.kernel_generate_head.cls_seg(feats)
seg_kernels = self.kernel_generate_head.conv_seg.weight.clone()
seg_kernels = seg_kernels[None].expand(
feats.size(0), *seg_kernels.size())
stage_segs = [sem_seg]
for i in range(self.num_stages):
sem_seg, seg_kernels = self.kernel_update_head[i](feats,
seg_kernels,
sem_seg)
stage_segs.append(sem_seg)
if self.training:
return stage_segs
# only return the prediction of the last stage during testing
return stage_segs[-1]
def losses(self, seg_logit, seg_label):
losses = dict()
for i, logit in enumerate(seg_logit):
loss = self.kernel_generate_head.losses(logit, seg_label)
for k, v in loss.items():
losses[f'{k}.s{i}'] = v
return losses
| 19,069 | 40.637555 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/lraspp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv import is_tuple_of
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class LRASPPHead(BaseDecodeHead):
"""Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3.
This head is the improved implementation of `Searching for MobileNetV3
<https://ieeexplore.ieee.org/document/9008835>`_.
Args:
branch_channels (tuple[int]): The number of output channels in every
each branch. Default: (32, 64).
"""
def __init__(self, branch_channels=(32, 64), **kwargs):
super(LRASPPHead, self).__init__(**kwargs)
if self.input_transform != 'multiple_select':
raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform '
f'must be \'multiple_select\'. But received '
f'\'{self.input_transform}\'')
assert is_tuple_of(branch_channels, int)
assert len(branch_channels) == len(self.in_channels) - 1
self.branch_channels = branch_channels
self.convs = nn.Sequential()
self.conv_ups = nn.Sequential()
for i in range(len(branch_channels)):
self.convs.add_module(
f'conv{i}',
nn.Conv2d(
self.in_channels[i], branch_channels[i], 1, bias=False))
self.conv_ups.add_module(
f'conv_up{i}',
ConvModule(
self.channels + branch_channels[i],
self.channels,
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
bias=False))
self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1)
self.aspp_conv = ConvModule(
self.in_channels[-1],
self.channels,
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
bias=False)
self.image_pool = nn.Sequential(
nn.AvgPool2d(kernel_size=49, stride=(16, 20)),
ConvModule(
self.in_channels[2],
self.channels,
1,
act_cfg=dict(type='Sigmoid'),
bias=False))
def forward(self, inputs):
"""Forward function."""
inputs = self._transform_inputs(inputs)
x = inputs[-1]
x = self.aspp_conv(x) * resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
x = self.conv_up_input(x)
for i in range(len(self.branch_channels) - 1, -1, -1):
x = resize(
x,
size=inputs[i].size()[2:],
mode='bilinear',
align_corners=self.align_corners)
x = torch.cat([x, self.convs[i](inputs[i])], 1)
x = self.conv_ups[i](x)
return self.cls_seg(x)
| 3,086 | 32.554348 | 77 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/nl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import NonLocal2d
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class NLHead(FCNHead):
"""Non-local Neural Networks.
This head is the implementation of `NLNet
<https://arxiv.org/abs/1711.07971>`_.
Args:
reduction (int): Reduction factor of projection transform. Default: 2.
use_scale (bool): Whether to scale pairwise_weight by
sqrt(1/inter_channels). Default: True.
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
'dot_product'. Default: 'embedded_gaussian.'.
"""
def __init__(self,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
**kwargs):
super(NLHead, self).__init__(num_convs=2, **kwargs)
self.reduction = reduction
self.use_scale = use_scale
self.mode = mode
self.nl_block = NonLocal2d(
in_channels=self.channels,
reduction=self.reduction,
use_scale=self.use_scale,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
mode=self.mode)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.nl_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output
| 1,605 | 30.490196 | 78 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/ocr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from ..utils import SelfAttentionBlock as _SelfAttentionBlock
from .cascade_decode_head import BaseCascadeDecodeHead
class SpatialGatherModule(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super(SpatialGatherModule, self).__init__()
self.scale = scale
def forward(self, feats, probs):
"""Forward function."""
batch_size, num_classes, height, width = probs.size()
channels = feats.size(1)
probs = probs.view(batch_size, num_classes, -1)
feats = feats.view(batch_size, channels, -1)
# [batch_size, height*width, num_classes]
feats = feats.permute(0, 2, 1)
# [batch_size, channels, height*width]
probs = F.softmax(self.scale * probs, dim=2)
# [batch_size, channels, num_classes]
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3)
return ocr_context
class ObjectAttentionBlock(_SelfAttentionBlock):
"""Make a OCR used SelfAttentionBlock."""
def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg,
act_cfg):
if scale > 1:
query_downsample = nn.MaxPool2d(kernel_size=scale)
else:
query_downsample = None
super(ObjectAttentionBlock, self).__init__(
key_in_channels=in_channels,
query_in_channels=in_channels,
channels=channels,
out_channels=in_channels,
share_key_query=False,
query_downsample=query_downsample,
key_downsample=None,
key_query_num_convs=2,
key_query_norm=True,
value_out_num_convs=1,
value_out_norm=True,
matmul_norm=True,
with_out=True,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.bottleneck = ConvModule(
in_channels * 2,
in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, query_feats, key_feats):
"""Forward function."""
context = super(ObjectAttentionBlock,
self).forward(query_feats, key_feats)
output = self.bottleneck(torch.cat([context, query_feats], dim=1))
if self.query_downsample is not None:
output = resize(query_feats)
return output
@HEADS.register_module()
class OCRHead(BaseCascadeDecodeHead):
"""Object-Contextual Representations for Semantic Segmentation.
This head is the implementation of `OCRNet
<https://arxiv.org/abs/1909.11065>`_.
Args:
ocr_channels (int): The intermediate channels of OCR block.
scale (int): The scale of probability map in SpatialGatherModule in
Default: 1.
"""
def __init__(self, ocr_channels, scale=1, **kwargs):
super(OCRHead, self).__init__(**kwargs)
self.ocr_channels = ocr_channels
self.scale = scale
self.object_context_block = ObjectAttentionBlock(
self.channels,
self.ocr_channels,
self.scale,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.spatial_gather_module = SpatialGatherModule(self.scale)
self.bottleneck = ConvModule(
self.in_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs, prev_output):
"""Forward function."""
x = self._transform_inputs(inputs)
feats = self.bottleneck(x)
context = self.spatial_gather_module(feats, prev_output)
object_context = self.object_context_block(feats, context)
output = self.cls_seg(object_context)
return output
| 4,327 | 32.550388 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/point_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
try:
from mmcv.ops import point_sample
except ModuleNotFoundError:
point_sample = None
from mmseg.models.builder import HEADS
from mmseg.ops import resize
from ..losses import accuracy
from .cascade_decode_head import BaseCascadeDecodeHead
def calculate_uncertainty(seg_logits):
"""Estimate uncertainty based on seg logits.
For each location of the prediction ``seg_logits`` we estimate
uncertainty as the difference between top first and top second
predicted logits.
Args:
seg_logits (Tensor): Semantic segmentation logits,
shape (batch_size, num_classes, height, width).
Returns:
scores (Tensor): T uncertainty scores with the most uncertain
locations having the highest uncertainty score, shape (
batch_size, 1, height, width)
"""
top2_scores = torch.topk(seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1)
@HEADS.register_module()
class PointHead(BaseCascadeDecodeHead):
"""A mask point head use in PointRend.
This head is implemented of `PointRend: Image Segmentation as
Rendering <https://arxiv.org/abs/1912.08193>`_.
``PointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict|None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict|None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
"""
def __init__(self,
num_fcs=3,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU', inplace=False),
**kwargs):
super(PointHead, self).__init__(
input_transform='multiple_select',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc_seg')),
**kwargs)
if point_sample is None:
raise RuntimeError('Please install mmcv-full for '
'point_sample ops')
self.num_fcs = num_fcs
self.coarse_pred_each_layer = coarse_pred_each_layer
fc_in_channels = sum(self.in_channels) + self.num_classes
fc_channels = self.channels
self.fcs = nn.ModuleList()
for k in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += self.num_classes if self.coarse_pred_each_layer \
else 0
self.fc_seg = nn.Conv1d(
fc_in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0)
if self.dropout_ratio > 0:
self.dropout = nn.Dropout(self.dropout_ratio)
delattr(self, 'conv_seg')
def cls_seg(self, feat):
"""Classify each pixel with fc."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.fc_seg(feat)
return output
def forward(self, fine_grained_point_feats, coarse_point_feats):
x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_point_feats), dim=1)
return self.cls_seg(x)
def _get_fine_grained_point_feats(self, x, points):
"""Sample from fine grained features.
Args:
x (list[Tensor]): Feature pyramid from by neck or backbone.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
fine_grained_feats (Tensor): Sampled fine grained feature,
shape (batch_size, sum(channels of x), num_points).
"""
fine_grained_feats_list = [
point_sample(_, points, align_corners=self.align_corners)
for _ in x
]
if len(fine_grained_feats_list) > 1:
fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1)
else:
fine_grained_feats = fine_grained_feats_list[0]
return fine_grained_feats
def _get_coarse_point_feats(self, prev_output, points):
"""Sample from fine grained features.
Args:
prev_output (list[Tensor]): Prediction of previous decode head.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
coarse_feats (Tensor): Sampled coarse feature, shape (batch_size,
num_classes, num_points).
"""
coarse_feats = point_sample(
prev_output, points, align_corners=self.align_corners)
return coarse_feats
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self._transform_inputs(inputs)
with torch.no_grad():
points = self.get_points_train(
prev_output, calculate_uncertainty, cfg=train_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_label = point_sample(
gt_semantic_seg.float(),
points,
mode='nearest',
align_corners=self.align_corners)
point_label = point_label.squeeze(1).long()
losses = self.losses(point_logits, point_label)
return losses
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
x = self._transform_inputs(inputs)
refined_seg_logits = prev_output.clone()
for _ in range(test_cfg.subdivision_steps):
refined_seg_logits = resize(
refined_seg_logits,
scale_factor=test_cfg.scale_factor,
mode='bilinear',
align_corners=self.align_corners)
batch_size, channels, height, width = refined_seg_logits.shape
point_indices, points = self.get_points_test(
refined_seg_logits, calculate_uncertainty, cfg=test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(
prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_seg_logits = refined_seg_logits.reshape(
batch_size, channels, height * width)
refined_seg_logits = refined_seg_logits.scatter_(
2, point_indices, point_logits)
refined_seg_logits = refined_seg_logits.view(
batch_size, channels, height, width)
return refined_seg_logits
def losses(self, point_logits, point_label):
"""Compute segmentation loss."""
loss = dict()
if not isinstance(self.loss_decode, nn.ModuleList):
losses_decode = [self.loss_decode]
else:
losses_decode = self.loss_decode
for loss_module in losses_decode:
loss['point' + loss_module.loss_name] = loss_module(
point_logits, point_label, ignore_index=self.ignore_index)
loss['acc_point'] = accuracy(
point_logits, point_label, ignore_index=self.ignore_index)
return loss
def get_points_train(self, seg_logits, uncertainty_func, cfg):
"""Sample points for training.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'uncertainty_func' function that takes point's logit prediction as
input.
Args:
seg_logits (Tensor): Semantic segmentation logits, shape (
batch_size, num_classes, height, width).
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains the coordinates of ``num_points`` sampled
points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = seg_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=seg_logits.device)
point_logits = point_sample(seg_logits, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=seg_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_point_coords = torch.rand(
batch_size, num_random_points, 2, device=seg_logits.device)
point_coords = torch.cat((point_coords, rand_point_coords), dim=1)
return point_coords
def get_points_test(self, seg_logits, uncertainty_func, cfg):
"""Sample points for testing.
Find ``num_points`` most uncertain points from ``uncertainty_map``.
Args:
seg_logits (Tensor): A tensor of shape (batch_size, num_classes,
height, width) for class-specific or class-agnostic prediction.
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (batch_size, num_points)
that contains indices from [0, height x width) of the most
uncertain points.
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the ``height x width`` grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = uncertainty_func(seg_logits)
batch_size, _, height, width = uncertainty_map.shape
h_step = 1.0 / height
w_step = 1.0 / width
uncertainty_map = uncertainty_map.view(batch_size, height * width)
num_points = min(height * width, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
point_coords = torch.zeros(
batch_size,
num_points,
2,
dtype=torch.float,
device=seg_logits.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
width).float() * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
width).float() * h_step
return point_indices, point_coords
| 15,280 | 40.865753 | 126 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/psa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
try:
from mmcv.ops import PSAMask
except ModuleNotFoundError:
PSAMask = None
@HEADS.register_module()
class PSAHead(BaseDecodeHead):
"""Point-wise Spatial Attention Network for Scene Parsing.
This head is the implementation of `PSANet
<https://hszhao.github.io/papers/eccv18_psanet.pdf>`_.
Args:
mask_size (tuple[int]): The PSA mask size. It usually equals input
size.
psa_type (str): The type of psa module. Options are 'collect',
'distribute', 'bi-direction'. Default: 'bi-direction'
compact (bool): Whether use compact map for 'collect' mode.
Default: True.
shrink_factor (int): The downsample factors of psa mask. Default: 2.
normalization_factor (float): The normalize factor of attention.
psa_softmax (bool): Whether use softmax for attention.
"""
def __init__(self,
mask_size,
psa_type='bi-direction',
compact=False,
shrink_factor=2,
normalization_factor=1.0,
psa_softmax=True,
**kwargs):
if PSAMask is None:
raise RuntimeError('Please install mmcv-full for PSAMask ops')
super(PSAHead, self).__init__(**kwargs)
assert psa_type in ['collect', 'distribute', 'bi-direction']
self.psa_type = psa_type
self.compact = compact
self.shrink_factor = shrink_factor
self.mask_size = mask_size
mask_h, mask_w = mask_size
self.psa_softmax = psa_softmax
if normalization_factor is None:
normalization_factor = mask_h * mask_w
self.normalization_factor = normalization_factor
self.reduce = ConvModule(
self.in_channels,
self.channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.attention = nn.Sequential(
ConvModule(
self.channels,
self.channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Conv2d(
self.channels, mask_h * mask_w, kernel_size=1, bias=False))
if psa_type == 'bi-direction':
self.reduce_p = ConvModule(
self.in_channels,
self.channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.attention_p = nn.Sequential(
ConvModule(
self.channels,
self.channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Conv2d(
self.channels, mask_h * mask_w, kernel_size=1, bias=False))
self.psamask_collect = PSAMask('collect', mask_size)
self.psamask_distribute = PSAMask('distribute', mask_size)
else:
self.psamask = PSAMask(psa_type, mask_size)
self.proj = ConvModule(
self.channels * (2 if psa_type == 'bi-direction' else 1),
self.in_channels,
kernel_size=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.bottleneck = ConvModule(
self.in_channels * 2,
self.channels,
kernel_size=3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
identity = x
align_corners = self.align_corners
if self.psa_type in ['collect', 'distribute']:
out = self.reduce(x)
n, c, h, w = out.size()
if self.shrink_factor != 1:
if h % self.shrink_factor and w % self.shrink_factor:
h = (h - 1) // self.shrink_factor + 1
w = (w - 1) // self.shrink_factor + 1
align_corners = True
else:
h = h // self.shrink_factor
w = w // self.shrink_factor
align_corners = False
out = resize(
out,
size=(h, w),
mode='bilinear',
align_corners=align_corners)
y = self.attention(out)
if self.compact:
if self.psa_type == 'collect':
y = y.view(n, h * w,
h * w).transpose(1, 2).view(n, h * w, h, w)
else:
y = self.psamask(y)
if self.psa_softmax:
y = F.softmax(y, dim=1)
out = torch.bmm(
out.view(n, c, h * w), y.view(n, h * w, h * w)).view(
n, c, h, w) * (1.0 / self.normalization_factor)
else:
x_col = self.reduce(x)
x_dis = self.reduce_p(x)
n, c, h, w = x_col.size()
if self.shrink_factor != 1:
if h % self.shrink_factor and w % self.shrink_factor:
h = (h - 1) // self.shrink_factor + 1
w = (w - 1) // self.shrink_factor + 1
align_corners = True
else:
h = h // self.shrink_factor
w = w // self.shrink_factor
align_corners = False
x_col = resize(
x_col,
size=(h, w),
mode='bilinear',
align_corners=align_corners)
x_dis = resize(
x_dis,
size=(h, w),
mode='bilinear',
align_corners=align_corners)
y_col = self.attention(x_col)
y_dis = self.attention_p(x_dis)
if self.compact:
y_dis = y_dis.view(n, h * w,
h * w).transpose(1, 2).view(n, h * w, h, w)
else:
y_col = self.psamask_collect(y_col)
y_dis = self.psamask_distribute(y_dis)
if self.psa_softmax:
y_col = F.softmax(y_col, dim=1)
y_dis = F.softmax(y_dis, dim=1)
x_col = torch.bmm(
x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view(
n, c, h, w) * (1.0 / self.normalization_factor)
x_dis = torch.bmm(
x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view(
n, c, h, w) * (1.0 / self.normalization_factor)
out = torch.cat([x_col, x_dis], 1)
out = self.proj(out)
out = resize(
out,
size=identity.shape[2:],
mode='bilinear',
align_corners=align_corners)
out = self.bottleneck(torch.cat((identity, out), dim=1))
out = self.cls_seg(out)
return out
| 7,532 | 37.045455 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/psp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners, **kwargs):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
**kwargs)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
upsampled_ppm_out = resize(
ppm_out,
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
@HEADS.register_module()
class PSPHead(BaseDecodeHead):
"""Pyramid Scene Parsing Network.
This head is the implementation of
`PSPNet <https://arxiv.org/abs/1612.01105>`_.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module. Default: (1, 2, 3, 6).
"""
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(PSPHead, self).__init__(**kwargs)
assert isinstance(pool_scales, (list, tuple))
self.pool_scales = pool_scales
self.psp_modules = PPM(
self.pool_scales,
self.in_channels,
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.bottleneck = ConvModule(
self.in_channels + len(pool_scales) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _forward_feature(self, inputs):
"""Forward function for feature maps before classifying each pixel with
``self.cls_seg`` fc.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
feats (Tensor): A tensor of shape (batch_size, self.channels,
H, W) which is feature map for last layer of decoder head.
"""
x = self._transform_inputs(inputs)
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
feats = self.bottleneck(psp_outs)
return feats
def forward(self, inputs):
"""Forward function."""
output = self._forward_feature(inputs)
output = self.cls_seg(output)
return output
| 3,884 | 31.923729 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/segformer_head.py | # Modified from
# https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/decode_heads/segformer_head.py
#
# This work is licensed under the NVIDIA Source Code License.
#
# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
# Augmentation (ADA)
#
# 1. Definitions
# "Licensor" means any person or entity that distributes its Work.
# "Software" means the original work of authorship made available under
# this License.
# "Work" means the Software and any additions to or derivative works of
# the Software that are made available under this License.
# The terms "reproduce," "reproduction," "derivative works," and
# "distribution" have the meaning as provided under U.S. copyright law;
# provided, however, that for the purposes of this License, derivative
# works shall not include works that remain separable from, or merely
# link (or bind by name) to the interfaces of, the Work.
# Works, including the Software, are "made available" under this License
# by including in or with the Work either (a) a copyright notice
# referencing the applicability of this License to the Work, or (b) a
# copy of this License.
# 2. License Grants
# 2.1 Copyright Grant. Subject to the terms and conditions of this
# License, each Licensor grants to you a perpetual, worldwide,
# non-exclusive, royalty-free, copyright license to reproduce,
# prepare derivative works of, publicly display, publicly perform,
# sublicense and distribute its Work and any resulting derivative
# works in any form.
# 3. Limitations
# 3.1 Redistribution. You may reproduce or distribute the Work only
# if (a) you do so under this License, (b) you include a complete
# copy of this License with your distribution, and (c) you retain
# without modification any copyright, patent, trademark, or
# attribution notices that are present in the Work.
# 3.2 Derivative Works. You may specify that additional or different
# terms apply to the use, reproduction, and distribution of your
# derivative works of the Work ("Your Terms") only if (a) Your Terms
# provide that the use limitation in Section 3.3 applies to your
# derivative works, and (b) you identify the specific derivative
# works that are subject to Your Terms. Notwithstanding Your Terms,
# this License (including the redistribution requirements in Section
# 3.1) will continue to apply to the Work itself.
# 3.3 Use Limitation. The Work and any derivative works thereof only
# may be used or intended for use non-commercially. Notwithstanding
# the foregoing, NVIDIA and its affiliates may use the Work and any
# derivative works commercially. As used herein, "non-commercially"
# means for research or evaluation purposes only.
# 3.4 Patent Claims. If you bring or threaten to bring a patent claim
# against any Licensor (including any claim, cross-claim or
# counterclaim in a lawsuit) to enforce any patents that you allege
# are infringed by any Work, then your rights under this License from
# such Licensor (including the grant in Section 2.1) will terminate
# immediately.
# 3.5 Trademarks. This License does not grant any rights to use any
# Licensor’s or its affiliates’ names, logos, or trademarks, except
# as necessary to reproduce the notices described in this License.
# 3.6 Termination. If you violate any term of this License, then your
# rights under this License (including the grant in Section 2.1) will
# terminate immediately.
# 4. Disclaimer of Warranty.
# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
# THIS LICENSE.
# 5. Limitation of Liability.
# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGES.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.models.builder import HEADS
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
from mmseg.ops import resize
@HEADS.register_module()
class SegformerHead(BaseDecodeHead):
"""The all mlp Head of segformer.
This head is the implementation of
`Segformer <https://arxiv.org/abs/2105.15203>` _.
Args:
interpolate_mode: The interpolate mode of MLP head upsample operation.
Default: 'bilinear'.
"""
def __init__(self, interpolate_mode='bilinear', **kwargs):
super().__init__(input_transform='multiple_select', **kwargs)
self.interpolate_mode = interpolate_mode
num_inputs = len(self.in_channels)
assert num_inputs == len(self.in_index)
self.convs = nn.ModuleList()
for i in range(num_inputs):
self.convs.append(
ConvModule(
in_channels=self.in_channels[i],
out_channels=self.channels,
kernel_size=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.fusion_conv = ConvModule(
in_channels=self.channels * num_inputs,
out_channels=self.channels,
kernel_size=1,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
# Receive 4 stage backbone feature map: 1/4, 1/8, 1/16, 1/32
inputs = self._transform_inputs(inputs)
outs = []
for idx in range(len(inputs)):
x = inputs[idx]
conv = self.convs[idx]
outs.append(
resize(
input=conv(x),
size=inputs[0].shape[2:],
mode=self.interpolate_mode,
align_corners=self.align_corners))
out = self.fusion_conv(torch.cat(outs, dim=1))
out = self.cls_seg(out)
return out
| 6,587 | 44.434483 | 93 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/segmenter_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_norm_layer
from mmcv.cnn.utils.weight_init import (constant_init, trunc_normal_,
trunc_normal_init)
from mmcv.runner import ModuleList
from mmseg.models.backbones.vit import TransformerEncoderLayer
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class SegmenterMaskTransformerHead(BaseDecodeHead):
"""Segmenter: Transformer for Semantic Segmentation.
This head is the implementation of
`Segmenter: <https://arxiv.org/abs/2105.05633>`_.
Args:
backbone_cfg:(dict): Config of backbone of
Context Path.
in_channels (int): The number of channels of input image.
num_layers (int): The depth of transformer.
num_heads (int): The number of attention heads.
embed_dims (int): The number of embedding dimension.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
drop_path_rate (float): stochastic depth rate. Default 0.1.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): Enable bias for qkv if True. Default: True.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
init_std (float): The value of std in weight initialization.
Default: 0.02.
"""
def __init__(
self,
in_channels,
num_layers,
num_heads,
embed_dims,
mlp_ratio=4,
drop_path_rate=0.1,
drop_rate=0.0,
attn_drop_rate=0.0,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
init_std=0.02,
**kwargs,
):
super(SegmenterMaskTransformerHead, self).__init__(
in_channels=in_channels, **kwargs)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)]
self.layers = ModuleList()
for i in range(num_layers):
self.layers.append(
TransformerEncoderLayer(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=mlp_ratio * embed_dims,
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[i],
num_fcs=num_fcs,
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
batch_first=True,
))
self.dec_proj = nn.Linear(in_channels, embed_dims)
self.cls_emb = nn.Parameter(
torch.randn(1, self.num_classes, embed_dims))
self.patch_proj = nn.Linear(embed_dims, embed_dims, bias=False)
self.classes_proj = nn.Linear(embed_dims, embed_dims, bias=False)
self.decoder_norm = build_norm_layer(
norm_cfg, embed_dims, postfix=1)[1]
self.mask_norm = build_norm_layer(
norm_cfg, self.num_classes, postfix=2)[1]
self.init_std = init_std
delattr(self, 'conv_seg')
def init_weights(self):
trunc_normal_(self.cls_emb, std=self.init_std)
trunc_normal_init(self.patch_proj, std=self.init_std)
trunc_normal_init(self.classes_proj, std=self.init_std)
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=self.init_std, bias=0)
elif isinstance(m, nn.LayerNorm):
constant_init(m, val=1.0, bias=0.0)
def forward(self, inputs):
x = self._transform_inputs(inputs)
b, c, h, w = x.shape
x = x.permute(0, 2, 3, 1).contiguous().view(b, -1, c)
x = self.dec_proj(x)
cls_emb = self.cls_emb.expand(x.size(0), -1, -1)
x = torch.cat((x, cls_emb), 1)
for layer in self.layers:
x = layer(x)
x = self.decoder_norm(x)
patches = self.patch_proj(x[:, :-self.num_classes])
cls_seg_feat = self.classes_proj(x[:, -self.num_classes:])
patches = F.normalize(patches, dim=2, p=2)
cls_seg_feat = F.normalize(cls_seg_feat, dim=2, p=2)
masks = patches @ cls_seg_feat.transpose(1, 2)
masks = self.mask_norm(masks)
masks = masks.permute(0, 2, 1).contiguous().view(b, -1, h, w)
return masks
| 4,895 | 35.537313 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/sep_aspp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .aspp_head import ASPPHead, ASPPModule
class DepthwiseSeparableASPPModule(ASPPModule):
"""Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable
conv."""
def __init__(self, **kwargs):
super(DepthwiseSeparableASPPModule, self).__init__(**kwargs)
for i, dilation in enumerate(self.dilations):
if dilation > 1:
self[i] = DepthwiseSeparableConvModule(
self.in_channels,
self.channels,
3,
dilation=dilation,
padding=dilation,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
@HEADS.register_module()
class DepthwiseSeparableASPPHead(ASPPHead):
"""Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation.
This head is the implementation of `DeepLabV3+
<https://arxiv.org/abs/1802.02611>`_.
Args:
c1_in_channels (int): The input channels of c1 decoder. If is 0,
the no decoder will be used.
c1_channels (int): The intermediate channels of c1 decoder.
"""
def __init__(self, c1_in_channels, c1_channels, **kwargs):
super(DepthwiseSeparableASPPHead, self).__init__(**kwargs)
assert c1_in_channels >= 0
self.aspp_modules = DepthwiseSeparableASPPModule(
dilations=self.dilations,
in_channels=self.in_channels,
channels=self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if c1_in_channels > 0:
self.c1_bottleneck = ConvModule(
c1_in_channels,
c1_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
self.c1_bottleneck = None
self.sep_bottleneck = nn.Sequential(
DepthwiseSeparableConvModule(
self.channels + c1_channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
DepthwiseSeparableConvModule(
self.channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
aspp_outs = [
resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
]
aspp_outs.extend(self.aspp_modules(x))
aspp_outs = torch.cat(aspp_outs, dim=1)
output = self.bottleneck(aspp_outs)
if self.c1_bottleneck is not None:
c1_output = self.c1_bottleneck(inputs[0])
output = resize(
input=output,
size=c1_output.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = torch.cat([output, c1_output], dim=1)
output = self.sep_bottleneck(output)
output = self.cls_seg(output)
return output
| 3,535 | 33.330097 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/sep_fcn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import DepthwiseSeparableConvModule
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class DepthwiseSeparableFCNHead(FCNHead):
"""Depthwise-Separable Fully Convolutional Network for Semantic
Segmentation.
This head is implemented according to `Fast-SCNN: Fast Semantic
Segmentation Network <https://arxiv.org/abs/1902.04502>`_.
Args:
in_channels(int): Number of output channels of FFM.
channels(int): Number of middle-stage channels in the decode head.
concat_input(bool): Whether to concatenate original decode input into
the result of several consecutive convolution layers.
Default: True.
num_classes(int): Used to determine the dimension of
final prediction tensor.
in_index(int): Correspond with 'out_indices' in FastSCNN backbone.
norm_cfg (dict | None): Config of norm layers.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
loss_decode(dict): Config of loss type and some
relevant additional options.
dw_act_cfg (dict):Activation config of depthwise ConvModule. If it is
'default', it will be the same as `act_cfg`. Default: None.
"""
def __init__(self, dw_act_cfg=None, **kwargs):
super(DepthwiseSeparableFCNHead, self).__init__(**kwargs)
self.convs[0] = DepthwiseSeparableConvModule(
self.in_channels,
self.channels,
kernel_size=self.kernel_size,
padding=self.kernel_size // 2,
norm_cfg=self.norm_cfg,
dw_act_cfg=dw_act_cfg)
for i in range(1, self.num_convs):
self.convs[i] = DepthwiseSeparableConvModule(
self.channels,
self.channels,
kernel_size=self.kernel_size,
padding=self.kernel_size // 2,
norm_cfg=self.norm_cfg,
dw_act_cfg=dw_act_cfg)
if self.concat_input:
self.conv_cat = DepthwiseSeparableConvModule(
self.in_channels + self.channels,
self.channels,
kernel_size=self.kernel_size,
padding=self.kernel_size // 2,
norm_cfg=self.norm_cfg,
dw_act_cfg=dw_act_cfg)
| 2,406 | 38.459016 | 77 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/setr_mla_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import Upsample
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class SETRMLAHead(BaseDecodeHead):
"""Multi level feature aggretation head of SETR.
MLA head of `SETR <https://arxiv.org/pdf/2012.15840.pdf>`_.
Args:
mlahead_channels (int): Channels of conv-conv-4x of multi-level feature
aggregation. Default: 128.
up_scale (int): The scale factor of interpolate. Default:4.
"""
def __init__(self, mla_channels=128, up_scale=4, **kwargs):
super(SETRMLAHead, self).__init__(
input_transform='multiple_select', **kwargs)
self.mla_channels = mla_channels
num_inputs = len(self.in_channels)
# Refer to self.cls_seg settings of BaseDecodeHead
assert self.channels == num_inputs * mla_channels
self.up_convs = nn.ModuleList()
for i in range(num_inputs):
self.up_convs.append(
nn.Sequential(
ConvModule(
in_channels=self.in_channels[i],
out_channels=mla_channels,
kernel_size=3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
in_channels=mla_channels,
out_channels=mla_channels,
kernel_size=3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
Upsample(
scale_factor=up_scale,
mode='bilinear',
align_corners=self.align_corners)))
def forward(self, inputs):
inputs = self._transform_inputs(inputs)
outs = []
for x, up_conv in zip(inputs, self.up_convs):
outs.append(up_conv(x))
out = torch.cat(outs, dim=1)
out = self.cls_seg(out)
return out
| 2,177 | 33.03125 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/setr_up_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, build_norm_layer
from mmseg.ops import Upsample
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class SETRUPHead(BaseDecodeHead):
"""Naive upsampling head and Progressive upsampling head of SETR.
Naive or PUP head of `SETR <https://arxiv.org/pdf/2012.15840.pdf>`_.
Args:
norm_layer (dict): Config dict for input normalization.
Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True).
num_convs (int): Number of decoder convolutions. Default: 1.
up_scale (int): The scale factor of interpolate. Default:4.
kernel_size (int): The kernel size of convolution when decoding
feature information from backbone. Default: 3.
init_cfg (dict | list[dict] | None): Initialization config dict.
Default: dict(
type='Constant', val=1.0, bias=0, layer='LayerNorm').
"""
def __init__(self,
norm_layer=dict(type='LN', eps=1e-6, requires_grad=True),
num_convs=1,
up_scale=4,
kernel_size=3,
init_cfg=[
dict(type='Constant', val=1.0, bias=0, layer='LayerNorm'),
dict(
type='Normal',
std=0.01,
override=dict(name='conv_seg'))
],
**kwargs):
assert kernel_size in [1, 3], 'kernel_size must be 1 or 3.'
super(SETRUPHead, self).__init__(init_cfg=init_cfg, **kwargs)
assert isinstance(self.in_channels, int)
_, self.norm = build_norm_layer(norm_layer, self.in_channels)
self.up_convs = nn.ModuleList()
in_channels = self.in_channels
out_channels = self.channels
for _ in range(num_convs):
self.up_convs.append(
nn.Sequential(
ConvModule(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=int(kernel_size - 1) // 2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
Upsample(
scale_factor=up_scale,
mode='bilinear',
align_corners=self.align_corners)))
in_channels = out_channels
def forward(self, x):
x = self._transform_inputs(x)
n, c, h, w = x.shape
x = x.reshape(n, c, h * w).transpose(2, 1).contiguous()
x = self.norm(x)
x = x.transpose(1, 2).reshape(n, c, h, w).contiguous()
for up_conv in self.up_convs:
x = up_conv(x)
out = self.cls_seg(x)
return out
| 2,962 | 35.134146 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/stdc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class STDCHead(FCNHead):
"""This head is the implementation of `Rethinking BiSeNet For Real-time
Semantic Segmentation <https://arxiv.org/abs/2104.13188>`_.
Args:
boundary_threshold (float): The threshold of calculating boundary.
Default: 0.1.
"""
def __init__(self, boundary_threshold=0.1, **kwargs):
super(STDCHead, self).__init__(**kwargs)
self.boundary_threshold = boundary_threshold
# Using register buffer to make laplacian kernel on the same
# device of `seg_label`.
self.register_buffer(
'laplacian_kernel',
torch.tensor([-1, -1, -1, -1, 8, -1, -1, -1, -1],
dtype=torch.float32,
requires_grad=False).reshape((1, 1, 3, 3)))
self.fusion_kernel = torch.nn.Parameter(
torch.tensor([[6. / 10], [3. / 10], [1. / 10]],
dtype=torch.float32).reshape(1, 3, 1, 1),
requires_grad=False)
def losses(self, seg_logit, seg_label):
"""Compute Detail Aggregation Loss."""
# Note: The paper claims `fusion_kernel` is a trainable 1x1 conv
# parameters. However, it is a constant in original repo and other
# codebase because it would not be added into computation graph
# after threshold operation.
seg_label = seg_label.to(self.laplacian_kernel)
boundary_targets = F.conv2d(
seg_label, self.laplacian_kernel, padding=1)
boundary_targets = boundary_targets.clamp(min=0)
boundary_targets[boundary_targets > self.boundary_threshold] = 1
boundary_targets[boundary_targets <= self.boundary_threshold] = 0
boundary_targets_x2 = F.conv2d(
seg_label, self.laplacian_kernel, stride=2, padding=1)
boundary_targets_x2 = boundary_targets_x2.clamp(min=0)
boundary_targets_x4 = F.conv2d(
seg_label, self.laplacian_kernel, stride=4, padding=1)
boundary_targets_x4 = boundary_targets_x4.clamp(min=0)
boundary_targets_x4_up = F.interpolate(
boundary_targets_x4, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up = F.interpolate(
boundary_targets_x2, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up[
boundary_targets_x2_up > self.boundary_threshold] = 1
boundary_targets_x2_up[
boundary_targets_x2_up <= self.boundary_threshold] = 0
boundary_targets_x4_up[
boundary_targets_x4_up > self.boundary_threshold] = 1
boundary_targets_x4_up[
boundary_targets_x4_up <= self.boundary_threshold] = 0
boundary_targets_pyramids = torch.stack(
(boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up),
dim=1)
boundary_targets_pyramids = boundary_targets_pyramids.squeeze(2)
boundary_targets_pyramid = F.conv2d(boundary_targets_pyramids,
self.fusion_kernel)
boundary_targets_pyramid[
boundary_targets_pyramid > self.boundary_threshold] = 1
boundary_targets_pyramid[
boundary_targets_pyramid <= self.boundary_threshold] = 0
loss = super(STDCHead, self).losses(seg_logit,
boundary_targets_pyramid.long())
return loss
| 3,584 | 40.686047 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/decode_heads/uper_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
from .psp_head import PPM
@HEADS.register_module()
class UPerHead(BaseDecodeHead):
"""Unified Perceptual Parsing for Scene Understanding.
This head is the implementation of `UPerNet
<https://arxiv.org/abs/1807.10221>`_.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module applied on the last feature. Default: (1, 2, 3, 6).
"""
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(UPerHead, self).__init__(
input_transform='multiple_select', **kwargs)
# PSP Module
self.psp_modules = PPM(
pool_scales,
self.in_channels[-1],
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.bottleneck = ConvModule(
self.in_channels[-1] + len(pool_scales) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = ConvModule(
in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
fpn_conv = ConvModule(
self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = ConvModule(
len(self.in_channels) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def psp_forward(self, inputs):
"""Forward function of PSP module."""
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def _forward_feature(self, inputs):
"""Forward function for feature maps before classifying each pixel with
``self.cls_seg`` fc.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
feats (Tensor): A tensor of shape (batch_size, self.channels,
H, W) which is feature map for last layer of decoder head.
"""
inputs = self._transform_inputs(inputs)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
laterals.append(self.psp_forward(inputs))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + resize(
laterals[i],
size=prev_shape,
mode='bilinear',
align_corners=self.align_corners)
# build outputs
fpn_outs = [
self.fpn_convs[i](laterals[i])
for i in range(used_backbone_levels - 1)
]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = resize(
fpn_outs[i],
size=fpn_outs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners)
fpn_outs = torch.cat(fpn_outs, dim=1)
feats = self.fpn_bottleneck(fpn_outs)
return feats
def forward(self, inputs):
"""Forward function."""
output = self._forward_feature(inputs)
output = self.cls_seg(output)
return output
| 4,516 | 31.035461 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss
from .lovasz_loss import LovaszLoss
from .tversky_loss import TverskyLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss',
'FocalLoss', 'TverskyLoss'
]
| 681 | 39.117647 | 72 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/accuracy.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None, ignore_index=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
ignore_index (int | None): The label index to be ignored. Default: None
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
if ignore_index is not None:
correct = correct[:, target != ignore_index]
res = []
eps = torch.finfo(torch.float32).eps
for k in topk:
# Avoid causing ZeroDivisionError when all pixels
# of an image are ignored
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + eps
if ignore_index is not None:
total_num = target[target != ignore_index].numel() + eps
else:
total_num = target.numel() + eps
res.append(correct_k.mul_(100.0 / total_num))
return res[0] if return_single else res
class Accuracy(nn.Module):
"""Accuracy calculation module."""
def __init__(self, topk=(1, ), thresh=None, ignore_index=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
self.ignore_index = ignore_index
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh,
self.ignore_index)
| 3,618 | 37.913978 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/cross_entropy_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100,
avg_non_ignore=False):
"""cross_entropy. The wrapper function for :func:`F.cross_entropy`
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
Default: None.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss.
Options are 'none', 'mean' and 'sum'. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Default: None.
ignore_index (int): Specifies a target value that is ignored and
does not contribute to the input gradients. When
``avg_non_ignore `` is ``True``, and the ``reduction`` is
``''mean''``, the loss is averaged over non-ignored targets.
Defaults: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
`New in version 0.23.0.`
"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
# average loss over non-ignored elements
# pytorch's official cross_entropy average loss over non-ignored elements
# refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa
if (avg_factor is None) and avg_non_ignore and reduction == 'mean':
avg_factor = label.numel() - (label == ignore_index).sum().item()
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights = bin_label_weights * valid_mask
return bin_labels, bin_label_weights, valid_mask
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100,
avg_non_ignore=False,
**kwargs):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
Note: In bce loss, label < 0 is invalid.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int): The label index to be ignored. Default: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
`New in version 0.23.0.`
Returns:
torch.Tensor: The calculated loss
"""
if pred.size(1) == 1:
# For binary class segmentation, the shape of pred is
# [N, 1, H, W] and that of label is [N, H, W].
# As the ignore_index often set as 255, so the
# binary class label check should mask out
# ignore_index
assert label[label != ignore_index].max() <= 1, \
'For pred with shape [N, 1, H, W], its label must have at ' \
'most 2 classes'
pred = pred.squeeze(1)
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (
pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
# `weight` returned from `_expand_onehot_labels`
# has been treated for valid (non-ignore) pixels
label, weight, valid_mask = _expand_onehot_labels(
label, weight, pred.shape, ignore_index)
else:
# should mask out the ignored elements
valid_mask = ((label >= 0) & (label != ignore_index)).float()
if weight is not None:
weight = weight * valid_mask
else:
weight = valid_mask
# average loss over non-ignored and valid elements
if reduction == 'mean' and avg_factor is None and avg_non_ignore:
avg_factor = valid_mask.sum().item()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None,
**kwargs):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
instead of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_ce'.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
`New in version 0.23.0.`
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_ce',
avg_non_ignore=False):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
self.avg_non_ignore = avg_non_ignore
if not self.avg_non_ignore and self.reduction == 'mean':
warnings.warn(
'Default ``avg_non_ignore`` is False, if you would like to '
'ignore the certain label and average loss over non-ignore '
'labels, which is the same with PyTorch official '
'cross_entropy, set ``avg_non_ignore=True``.')
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
self._loss_name = loss_name
def extra_repr(self):
"""Extra repr."""
s = f'avg_non_ignore={self.avg_non_ignore}'
return s
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=-100,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# Note: for BCE loss, label < 0 is invalid.
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
avg_non_ignore=self.avg_non_ignore,
ignore_index=ignore_index,
**kwargs)
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| 11,998 | 39.400673 | 132 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/dice_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/
segmentron/solver/loss.py (Apache-2.0 License)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weighted_loss
@weighted_loss
def dice_loss(pred,
target,
valid_mask,
smooth=1,
exponent=2,
class_weight=None,
ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(
pred[:, i],
target[..., i],
valid_mask=valid_mask,
smooth=smooth,
exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwargs):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@LOSSES.register_module()
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_dice'.
"""
def __init__(self,
smooth=1,
exponent=2,
reduction='mean',
class_weight=None,
loss_weight=1.0,
ignore_index=255,
loss_name='loss_dice',
**kwargs):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self._loss_name = loss_name
def forward(self,
pred,
target,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor,
smooth=self.smooth,
exponent=self.exponent,
class_weight=class_weight,
ignore_index=self.ignore_index)
return loss
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| 4,928 | 34.717391 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/open-mmlab/mmdetection
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is used when cuda is not available
def py_sigmoid_focal_loss(pred,
target,
one_hot_target=None,
weight=None,
gamma=2.0,
alpha=0.5,
class_weight=None,
valid_mask=None,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction with
shape (N, C)
one_hot_target (None): Placeholder. It should be None.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal Loss.
Defaults to 0.5.
class_weight (list[float], optional): Weight of each class.
Defaults to None.
valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid
samples and uses 0 to mark the ignored samples. Default: None.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
if isinstance(alpha, list):
alpha = pred.new_tensor(alpha)
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
one_minus_pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * one_minus_pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
final_weight = torch.ones(1, pred.size(1)).type_as(loss)
if weight is not None:
if weight.shape != loss.shape and weight.size(0) == loss.size(0):
# For most cases, weight is of shape (N, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
assert weight.dim() == loss.dim()
final_weight = final_weight * weight
if class_weight is not None:
final_weight = final_weight * pred.new_tensor(class_weight)
if valid_mask is not None:
final_weight = final_weight * valid_mask
loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
one_hot_target,
weight=None,
gamma=2.0,
alpha=0.5,
class_weight=None,
valid_mask=None,
reduction='mean',
avg_factor=None):
r"""A wrapper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction. It's shape
should be (N, )
one_hot_target (torch.Tensor): The learning label with shape (N, C)
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal Loss.
Defaults to 0.5.
class_weight (list[float], optional): Weight of each class.
Defaults to None.
valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid
samples and uses 0 to mark the ignored samples. Default: None.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
final_weight = torch.ones(1, pred.size(1)).type_as(pred)
if isinstance(alpha, list):
# _sigmoid_focal_loss doesn't accept alpha of list type. Therefore, if
# a list is given, we set the input alpha as 0.5. This means setting
# equal weight for foreground class and background class. By
# multiplying the loss by 2, the effect of setting alpha as 0.5 is
# undone. The alpha of type list is used to regulate the loss in the
# post-processing process.
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(),
gamma, 0.5, None, 'none') * 2
alpha = pred.new_tensor(alpha)
final_weight = final_weight * (
alpha * one_hot_target + (1 - alpha) * (1 - one_hot_target))
else:
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(),
gamma, alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape and weight.size(0) == loss.size(0):
# For most cases, weight is of shape (N, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
assert weight.dim() == loss.dim()
final_weight = final_weight * weight
if class_weight is not None:
final_weight = final_weight * pred.new_tensor(class_weight)
if valid_mask is not None:
final_weight = final_weight * valid_mask
loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.5,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_focal'):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float | list[float], optional): A balanced form for Focal
Loss. Defaults to 0.5. When a list is provided, the length
of the list should be equal to the number of classes.
Please be careful that this parameter is not the
class-wise weight but the weight of a binary classification
problem. This binary classification problem regards the
pixels which belong to one class as the foreground
and the other pixels as the background, each element in
the list is the weight of the corresponding foreground class.
The value of alpha or each element of alpha should be a float
in the interval [0, 1]. If you want to specify the class-wise
weight, please use `class_weight` parameter.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this
loss item to be included into the backward graph, `loss_` must
be the prefix of the name. Defaults to 'loss_focal'.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, \
'AssertionError: Only sigmoid focal loss supported now.'
assert reduction in ('none', 'mean', 'sum'), \
"AssertionError: reduction should be 'none', 'mean' or " \
"'sum'"
assert isinstance(alpha, (float, list)), \
'AssertionError: alpha should be of type float'
assert isinstance(gamma, float), \
'AssertionError: gamma should be of type float'
assert isinstance(loss_weight, float), \
'AssertionError: loss_weight should be of type float'
assert isinstance(loss_name, str), \
'AssertionError: loss_name should be of type str'
assert isinstance(class_weight, list) or class_weight is None, \
'AssertionError: class_weight must be None or of type list'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.class_weight = class_weight
self.loss_weight = loss_weight
self._loss_name = loss_name
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=255,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction with shape
(N, C) where C = number of classes, or
(N, C, d_1, d_2, ..., d_K) with K≥1 in the
case of K-dimensional loss.
target (torch.Tensor): The ground truth. If containing class
indices, shape (N) where each value is 0≤targets[i]≤C−1,
or (N, d_1, d_2, ..., d_K) with K≥1 in the case of
K-dimensional loss. If containing class probabilities,
same shape as the input.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used
to override the original reduction method of the loss.
Options are "none", "mean" and "sum".
ignore_index (int, optional): The label index to be ignored.
Default: 255
Returns:
torch.Tensor: The calculated loss
"""
assert isinstance(ignore_index, int), \
'ignore_index must be of type int'
assert reduction_override in (None, 'none', 'mean', 'sum'), \
"AssertionError: reduction should be 'none', 'mean' or " \
"'sum'"
assert pred.shape == target.shape or \
(pred.size(0) == target.size(0) and
pred.shape[2:] == target.shape[1:]), \
"The shape of pred doesn't match the shape of target"
original_shape = pred.shape
# [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k]
pred = pred.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
pred = pred.reshape(pred.size(0), -1)
# [C, N] -> [N, C]
pred = pred.transpose(0, 1).contiguous()
if original_shape == target.shape:
# target with shape [B, C, d_1, d_2, ...]
# transform it's shape into [N, C]
# [B, C, d_1, d_2, ...] -> [C, B, d_1, d_2, ..., d_k]
target = target.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
target = target.reshape(target.size(0), -1)
# [C, N] -> [N, C]
target = target.transpose(0, 1).contiguous()
else:
# target with shape [B, d_1, d_2, ...]
# transform it's shape into [N, ]
target = target.view(-1).contiguous()
valid_mask = (target != ignore_index).view(-1, 1)
# avoid raising error when using F.one_hot()
target = torch.where(target == ignore_index, target.new_tensor(0),
target)
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
num_classes = pred.size(1)
if torch.cuda.is_available() and pred.is_cuda:
if target.dim() == 1:
one_hot_target = F.one_hot(target, num_classes=num_classes)
else:
one_hot_target = target
target = target.argmax(dim=1)
valid_mask = (target != ignore_index).view(-1, 1)
calculate_loss_func = sigmoid_focal_loss
else:
one_hot_target = None
if target.dim() == 1:
target = F.one_hot(target, num_classes=num_classes)
else:
valid_mask = (target.argmax(dim=1) != ignore_index).view(
-1, 1)
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
one_hot_target,
weight,
gamma=self.gamma,
alpha=self.alpha,
class_weight=self.class_weight,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor)
if reduction == 'none':
# [N, C] -> [C, N]
loss_cls = loss_cls.transpose(0, 1)
# [C, N] -> [C, B, d1, d2, ...]
# original_shape: [B, C, d1, d2, ...]
loss_cls = loss_cls.reshape(original_shape[1],
original_shape[0],
*original_shape[2:])
# [C, B, d1, d2, ...] -> [B, C, d1, d2, ...]
loss_cls = loss_cls.transpose(0, 1).contiguous()
else:
raise NotImplementedError
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| 15,001 | 44.737805 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/lovasz_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor
ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim
Berman 2018 ESAT-PSI KU Leuven (MIT License)"""
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def lovasz_grad(gt_sorted):
"""Computes gradient of the Lovasz extension w.r.t sorted errors.
See Alg. 1 in paper.
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits = logits.view(-1)
labels = labels.view(-1)
if ignore_index is None:
return logits, labels
valid = (labels != ignore_index)
vlogits = logits[valid]
vlabels = labels[valid]
return vlogits, vlabels
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probs.size()
probs = probs.view(B, 1, H, W)
B, C, H, W = probs.size()
probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
labels = labels.view(-1)
if ignore_index is None:
return probs, labels
valid = (labels != ignore_index)
vprobs = probs[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobs, vlabels
def lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [P], logits at each prediction
(between -infty and +infty).
labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
Returns:
torch.Tensor: The calculated loss.
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def lovasz_hinge(logits,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [B, H, W], logits at each pixel
(between -infty and +infty).
labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
classes (str | list[int], optional): Placeholder, to be consistent with
other loss. Default: None.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): Placeholder, to be consistent
with other loss. Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_hinge_flat(*flatten_binary_logits(
logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
for logit, label in zip(logits, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_hinge_flat(
*flatten_binary_logits(logits, labels, ignore_index))
return loss
def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [P, C], class probabilities at each prediction
(between 0 and 1).
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
class_weight (list[float], optional): The weight for each class.
Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
if probs.numel() == 0:
# only void pixels, the gradients should be 0
return probs * 0.
C = probs.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes == 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probs[:, 0]
else:
class_pred = probs[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
if class_weight is not None:
loss *= class_weight[c]
losses.append(loss)
return torch.stack(losses).mean()
def lovasz_softmax(probs,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [B, C, H, W], class probabilities at each
prediction (between 0 and 1).
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_softmax_flat(
*flatten_probs(
prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
classes=classes,
class_weight=class_weight)
for prob, label in zip(probs, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_softmax_flat(
*flatten_probs(probs, labels, ignore_index),
classes=classes,
class_weight=class_weight)
return loss
@LOSSES.register_module()
class LovaszLoss(nn.Module):
"""LovaszLoss.
This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate
for the optimization of the intersection-over-union measure in neural
networks <https://arxiv.org/abs/1705.08790>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_lovasz'.
"""
def __init__(self,
loss_type='multi_class',
classes='present',
per_image=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
loss_name='loss_lovasz'):
super(LovaszLoss, self).__init__()
assert loss_type in ('binary', 'multi_class'), "loss_type should be \
'binary' or 'multi_class'."
if loss_type == 'binary':
self.cls_criterion = lovasz_hinge
else:
self.cls_criterion = lovasz_softmax
assert classes in ('all', 'present') or mmcv.is_list_of(classes, int)
if not per_image:
assert reduction == 'none', "reduction should be 'none' when \
per_image is False."
self.classes = classes
self.per_image = per_image
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
self._loss_name = loss_name
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# if multi-class loss, transform logits to probs
if self.cls_criterion == lovasz_softmax:
cls_score = F.softmax(cls_score, dim=1)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
self.classes,
self.per_image,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| 12,223 | 36.728395 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/tversky_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Modified from
https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py#L333
(Apache-2.0 License)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weighted_loss
@weighted_loss
def tversky_loss(pred,
target,
valid_mask,
alpha=0.3,
beta=0.7,
gamma=1.0,
smooth=1,
class_weight=None,
ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
tversky_loss = binary_tversky_loss(
pred[:, i],
target[..., i],
valid_mask=valid_mask,
alpha=alpha,
beta=beta,
smooth=smooth)
if gamma > 1.0:
tversky_loss **= (1 / gamma)
if class_weight is not None:
tversky_loss *= class_weight[i]
total_loss += tversky_loss
return total_loss / num_classes
@weighted_loss
def binary_tversky_loss(pred,
target,
valid_mask,
alpha=0.3,
beta=0.7,
smooth=1):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
TP = torch.sum(torch.mul(pred, target) * valid_mask, dim=1)
FP = torch.sum(torch.mul(pred, 1 - target) * valid_mask, dim=1)
FN = torch.sum(torch.mul(1 - pred, target) * valid_mask, dim=1)
tversky = (TP + smooth) / (TP + alpha * FP + beta * FN + smooth)
return 1 - tversky
@LOSSES.register_module()
class TverskyLoss(nn.Module):
"""TverskyLoss. This loss is proposed in `Tversky loss function for image
segmentation using 3D fully convolutional deep networks.
<https://arxiv.org/abs/1706.05721>`
and `A novel focal Tversky loss function with improved attention U-Net for
lesion segmentation.
<https://arxiv.org/abs/1810.07842>`_.
Args:
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
alpha(float, in [0, 1]):
The coefficient of false positives. Default: 0.3.
beta (float, in [0, 1]):
The coefficient of false negatives. Default: 0.7.
Note: alpha + beta = 1.
gamma (float, in [1, inf]): The focal term. When `gamma` > 1,
the loss focuses more on less accurate predictions that
have been misclassified. Default: 1.0.
loss_name (str, optional): Name of the loss item. If you want this loss
item to be included into the backward graph, `loss_` must be the
prefix of the name. Defaults to 'loss_tversky'.
"""
def __init__(self,
smooth=1,
class_weight=None,
loss_weight=1.0,
ignore_index=255,
alpha=0.3,
beta=0.7,
gamma=1.0,
loss_name='loss_tversky'):
super(TverskyLoss, self).__init__()
self.smooth = smooth
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
assert (alpha + beta == 1.0), 'Sum of alpha and beta but be 1.0!'
assert gamma >= 1.0, 'gamma should be at least 1.0!'
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._loss_name = loss_name
def forward(self, pred, target, **kwargs):
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * tversky_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
alpha=self.alpha,
beta=self.beta,
gamma=self.gamma,
smooth=self.smooth,
class_weight=class_weight,
ignore_index=self.ignore_index)
return loss
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
| 5,419 | 34.657895 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
# take it as a file path
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
# pkl, json or yaml
class_weight = mmcv.load(class_weight)
return class_weight
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,945 | 30.070866 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .featurepyramid import Feature2Pyramid
from .fpn import FPN
from .ic_neck import ICNeck
from .jpu import JPU
from .mla_neck import MLANeck
from .multilevel_neck import MultiLevelNeck
__all__ = [
'FPN', 'MultiLevelNeck', 'MLANeck', 'ICNeck', 'JPU', 'Feature2Pyramid'
]
| 326 | 26.25 | 74 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/featurepyramid.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from ..builder import NECKS
@NECKS.register_module()
class Feature2Pyramid(nn.Module):
"""Feature2Pyramid.
A neck structure connect ViT backbone and decoder_heads.
Args:
embed_dims (int): Embedding dimension.
rescales (list[float]): Different sampling multiples were
used to obtain pyramid features. Default: [4, 2, 1, 0.5].
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='SyncBN', requires_grad=True).
"""
def __init__(self,
embed_dim,
rescales=[4, 2, 1, 0.5],
norm_cfg=dict(type='SyncBN', requires_grad=True)):
super(Feature2Pyramid, self).__init__()
self.rescales = rescales
self.upsample_4x = None
for k in self.rescales:
if k == 4:
self.upsample_4x = nn.Sequential(
nn.ConvTranspose2d(
embed_dim, embed_dim, kernel_size=2, stride=2),
build_norm_layer(norm_cfg, embed_dim)[1],
nn.GELU(),
nn.ConvTranspose2d(
embed_dim, embed_dim, kernel_size=2, stride=2),
)
elif k == 2:
self.upsample_2x = nn.Sequential(
nn.ConvTranspose2d(
embed_dim, embed_dim, kernel_size=2, stride=2))
elif k == 1:
self.identity = nn.Identity()
elif k == 0.5:
self.downsample_2x = nn.MaxPool2d(kernel_size=2, stride=2)
elif k == 0.25:
self.downsample_4x = nn.MaxPool2d(kernel_size=4, stride=4)
else:
raise KeyError(f'invalid {k} for feature2pyramid')
def forward(self, inputs):
assert len(inputs) == len(self.rescales)
outputs = []
if self.upsample_4x is not None:
ops = [
self.upsample_4x, self.upsample_2x, self.identity,
self.downsample_2x
]
else:
ops = [
self.upsample_2x, self.identity, self.downsample_2x,
self.downsample_4x
]
for i in range(len(inputs)):
outputs.append(ops[i](inputs[i]))
return tuple(outputs)
| 2,417 | 34.558824 | 74 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from mmseg.ops import resize
from ..builder import NECKS
@NECKS.register_module()
class FPN(BaseModule):
"""Feature Pyramid Network.
This neck is the implementation of `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (list[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
on the original feature from the backbone. If True,
it is equivalent to `add_extra_convs='on_input'`. If False, it is
equivalent to set `add_extra_convs='on_output'`. Default to True.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer in ConvModule.
Default: None.
upsample_cfg (dict): Config dict for interpolate layer.
Default: dict(mode='nearest').
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
if extra_convs_on_inputs:
# For compatibility with previous release
# TODO: deprecate `extra_convs_on_inputs`
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] = laterals[i - 1] + resize(
laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + resize(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 9,238 | 42.172897 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/ic_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmseg.ops import resize
from ..builder import NECKS
class CascadeFeatureFusion(BaseModule):
"""Cascade Feature Fusion Unit in ICNet.
Args:
low_channels (int): The number of input channels for
low resolution feature map.
high_channels (int): The number of input channels for
high resolution feature map.
out_channels (int): The number of output channels.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN').
act_cfg (dict): Dictionary to construct and config act layer.
Default: dict(type='ReLU').
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Returns:
x (Tensor): The output tensor of shape (N, out_channels, H, W).
x_low (Tensor): The output tensor of shape (N, out_channels, H, W)
for Cascade Label Guidance in auxiliary heads.
"""
def __init__(self,
low_channels,
high_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
init_cfg=None):
super(CascadeFeatureFusion, self).__init__(init_cfg=init_cfg)
self.align_corners = align_corners
self.conv_low = ConvModule(
low_channels,
out_channels,
3,
padding=2,
dilation=2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv_high = ConvModule(
high_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x_low, x_high):
x_low = resize(
x_low,
size=x_high.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
# Note: Different from original paper, `x_low` is underwent
# `self.conv_low` rather than another 1x1 conv classifier
# before being used for auxiliary head.
x_low = self.conv_low(x_low)
x_high = self.conv_high(x_high)
x = x_low + x_high
x = F.relu(x, inplace=True)
return x, x_low
@NECKS.register_module()
class ICNeck(BaseModule):
"""ICNet for Real-Time Semantic Segmentation on High-Resolution Images.
This head is the implementation of `ICHead
<https://arxiv.org/abs/1704.08545>`_.
Args:
in_channels (int): The number of input image channels. Default: 3.
out_channels (int): The numbers of output feature channels.
Default: 128.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN').
act_cfg (dict): Dictionary to construct and config act layer.
Default: dict(type='ReLU').
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels=(64, 256, 256),
out_channels=128,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
init_cfg=None):
super(ICNeck, self).__init__(init_cfg=init_cfg)
assert len(in_channels) == 3, 'Length of input channels \
must be 3!'
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.align_corners = align_corners
self.cff_24 = CascadeFeatureFusion(
self.in_channels[2],
self.in_channels[1],
self.out_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.cff_12 = CascadeFeatureFusion(
self.out_channels,
self.in_channels[0],
self.out_channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
def forward(self, inputs):
assert len(inputs) == 3, 'Length of input feature \
maps must be 3!'
x_sub1, x_sub2, x_sub4 = inputs
x_cff_24, x_24 = self.cff_24(x_sub4, x_sub2)
x_cff_12, x_12 = self.cff_12(x_cff_24, x_sub1)
# Note: `x_cff_12` is used for decode_head,
# `x_24` and `x_12` are used for auxiliary head.
return x_24, x_12, x_cff_12
| 5,360 | 34.979866 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/jpu.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from mmseg.ops import resize
from ..builder import NECKS
@NECKS.register_module()
class JPU(BaseModule):
"""FastFCN: Rethinking Dilated Convolution in the Backbone
for Semantic Segmentation.
This Joint Pyramid Upsampling (JPU) neck is the implementation of
`FastFCN <https://arxiv.org/abs/1903.11816>`_.
Args:
in_channels (Tuple[int], optional): The number of input channels
for each convolution operations before upsampling.
Default: (512, 1024, 2048).
mid_channels (int): The number of output channels of JPU.
Default: 512.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
dilations (tuple[int]): Dilation rate of each Depthwise
Separable ConvModule. Default: (1, 2, 4, 8).
align_corners (bool, optional): The align_corners argument of
resize operation. Default: False.
conv_cfg (dict | None): Config of conv layers.
Default: None.
norm_cfg (dict | None): Config of norm layers.
Default: dict(type='BN').
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels=(512, 1024, 2048),
mid_channels=512,
start_level=0,
end_level=-1,
dilations=(1, 2, 4, 8),
align_corners=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super(JPU, self).__init__(init_cfg=init_cfg)
assert isinstance(in_channels, tuple)
assert isinstance(dilations, tuple)
self.in_channels = in_channels
self.mid_channels = mid_channels
self.start_level = start_level
self.num_ins = len(in_channels)
if end_level == -1:
self.backbone_end_level = self.num_ins
else:
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
self.dilations = dilations
self.align_corners = align_corners
self.conv_layers = nn.ModuleList()
self.dilation_layers = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
conv_layer = nn.Sequential(
ConvModule(
self.in_channels[i],
self.mid_channels,
kernel_size=3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.conv_layers.append(conv_layer)
for i in range(len(dilations)):
dilation_layer = nn.Sequential(
DepthwiseSeparableConvModule(
in_channels=(self.backbone_end_level - self.start_level) *
self.mid_channels,
out_channels=self.mid_channels,
kernel_size=3,
stride=1,
padding=dilations[i],
dilation=dilations[i],
dw_norm_cfg=norm_cfg,
dw_act_cfg=None,
pw_norm_cfg=norm_cfg,
pw_act_cfg=act_cfg))
self.dilation_layers.append(dilation_layer)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels), 'Length of inputs must \
be the same with self.in_channels!'
feats = [
self.conv_layers[i - self.start_level](inputs[i])
for i in range(self.start_level, self.backbone_end_level)
]
h, w = feats[0].shape[2:]
for i in range(1, len(feats)):
feats[i] = resize(
feats[i],
size=(h, w),
mode='bilinear',
align_corners=self.align_corners)
feat = torch.cat(feats, dim=1)
concat_feat = torch.cat([
self.dilation_layers[i](feat) for i in range(len(self.dilations))
],
dim=1)
outs = []
# Default: outs[2] is the output of JPU for decoder head, outs[1] is
# the feature map from backbone for auxiliary head. Additionally,
# outs[0] can also be used for auxiliary head.
for i in range(self.start_level, self.backbone_end_level - 1):
outs.append(inputs[i])
outs.append(concat_feat)
return tuple(outs)
| 5,079 | 37.484848 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/mla_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, build_norm_layer
from ..builder import NECKS
class MLAModule(nn.Module):
def __init__(self,
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
norm_cfg=None,
act_cfg=None):
super(MLAModule, self).__init__()
self.channel_proj = nn.ModuleList()
for i in range(len(in_channels)):
self.channel_proj.append(
ConvModule(
in_channels=in_channels[i],
out_channels=out_channels,
kernel_size=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.feat_extract = nn.ModuleList()
for i in range(len(in_channels)):
self.feat_extract.append(
ConvModule(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
# feat_list -> [p2, p3, p4, p5]
feat_list = []
for x, conv in zip(inputs, self.channel_proj):
feat_list.append(conv(x))
# feat_list -> [p5, p4, p3, p2]
# mid_list -> [m5, m4, m3, m2]
feat_list = feat_list[::-1]
mid_list = []
for feat in feat_list:
if len(mid_list) == 0:
mid_list.append(feat)
else:
mid_list.append(mid_list[-1] + feat)
# mid_list -> [m5, m4, m3, m2]
# out_list -> [o2, o3, o4, o5]
out_list = []
for mid, conv in zip(mid_list, self.feat_extract):
out_list.append(conv(mid))
return tuple(out_list)
@NECKS.register_module()
class MLANeck(nn.Module):
"""Multi-level Feature Aggregation.
This neck is `The Multi-level Feature Aggregation construction of
SETR <https://arxiv.org/abs/2012.15840>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
norm_layer (dict): Config dict for input normalization.
Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True).
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer in ConvModule.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
norm_layer=dict(type='LN', eps=1e-6, requires_grad=True),
norm_cfg=None,
act_cfg=None):
super(MLANeck, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
# In order to build general vision transformer backbone, we have to
# move MLA to neck.
self.norm = nn.ModuleList([
build_norm_layer(norm_layer, in_channels[i])[1]
for i in range(len(in_channels))
])
self.mla = MLAModule(
in_channels=in_channels,
out_channels=out_channels,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# Convert from nchw to nlc
outs = []
for i in range(len(inputs)):
x = inputs[i]
n, c, h, w = x.shape
x = x.reshape(n, c, h * w).transpose(2, 1).contiguous()
x = self.norm[i](x)
x = x.transpose(1, 2).reshape(n, c, h, w).contiguous()
outs.append(x)
outs = self.mla(outs)
return tuple(outs)
| 3,873 | 31.554622 | 78 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/necks/multilevel_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, xavier_init
from mmseg.ops import resize
from ..builder import NECKS
@NECKS.register_module()
class MultiLevelNeck(nn.Module):
"""MultiLevelNeck.
A neck structure connect vit backbone and decoder_heads.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
scales (List[float]): Scale factors for each input feature map.
Default: [0.5, 1, 2, 4]
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer in ConvModule.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
scales=[0.5, 1, 2, 4],
norm_cfg=None,
act_cfg=None):
super(MultiLevelNeck, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.scales = scales
self.num_outs = len(scales)
self.lateral_convs = nn.ModuleList()
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.lateral_convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
for _ in range(self.num_outs):
self.convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
inputs = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# for len(inputs) not equal to self.num_outs
if len(inputs) == 1:
inputs = [inputs[0] for _ in range(self.num_outs)]
outs = []
for i in range(self.num_outs):
x_resize = resize(
inputs[i], scale_factor=self.scales[i], mode='bilinear')
outs.append(self.convs[i](x_resize))
return tuple(outs)
| 2,716 | 33.392405 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/segmentors/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseSegmentor
from .cascade_encoder_decoder import CascadeEncoderDecoder
from .encoder_decoder import EncoderDecoder
__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder']
| 255 | 35.571429 | 70 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/segmentors/base.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
class BaseSegmentor(BaseModule, metaclass=ABCMeta):
"""Base class for segmentors."""
def __init__(self, init_cfg=None):
super(BaseSegmentor, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the segmentor has neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_auxiliary_head(self):
"""bool: whether the segmentor has auxiliary head"""
return hasattr(self,
'auxiliary_head') and self.auxiliary_head is not None
@property
def with_decode_head(self):
"""bool: whether the segmentor has decode head"""
return hasattr(self, 'decode_head') and self.decode_head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Placeholder for extract features from images."""
pass
@abstractmethod
def encode_decode(self, img, img_metas):
"""Placeholder for encode images with backbone and decode into a
semantic segmentation map of the same size as input."""
pass
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""Placeholder for Forward function for training."""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
"""Placeholder for single image test."""
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Placeholder for augmentation test."""
pass
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img_metas']))
return outputs
def val_step(self, data_batch, optimizer=None, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
log_vars_ = dict()
for loss_name, loss_value in log_vars.items():
k = loss_name + '_val'
log_vars_[k] = loss_value
outputs = dict(
loss=loss,
log_vars=log_vars_,
num_samples=len(data_batch['img_metas']))
return outputs
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
# If the loss_vars has different length, raise assertion error
# to prevent GPUs from infinite waiting.
if dist.is_available() and dist.is_initialized():
log_var_length = torch.tensor(len(log_vars), device=loss.device)
dist.all_reduce(log_var_length)
message = (f'rank {dist.get_rank()}' +
f' len(log_vars): {len(log_vars)}' + ' keys: ' +
','.join(log_vars.keys()) + '\n')
assert log_var_length == len(log_vars) * dist.get_world_size(), \
'loss log variables are different across GPUs!\n' + message
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def show_result(self,
img,
result,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if palette is None:
if self.PALETTE is None:
# Get random state before set seed,
# and restore random state later.
# It will prevent loss of randomness, as the palette
# may be different in each iteration if not specified.
# See: https://github.com/open-mmlab/mmdetection/issues/5844
state = np.random.get_state()
np.random.seed(42)
# random palette
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
np.random.set_state(state)
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 11,489 | 38.349315 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/segmentors/cascade_encoder_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch import nn
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .encoder_decoder import EncoderDecoder
@SEGMENTORS.register_module()
class CascadeEncoderDecoder(EncoderDecoder):
"""Cascade Encoder Decoder segmentors.
CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of
CascadeEncoderDecoder are cascaded. The output of previous decoder_head
will be the input of next decoder_head.
"""
def __init__(self,
num_stages,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
self.num_stages = num_stages
super(CascadeEncoderDecoder, self).__init__(
backbone=backbone,
decode_head=decode_head,
neck=neck,
auxiliary_head=auxiliary_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
assert isinstance(decode_head, list)
assert len(decode_head) == self.num_stages
self.decode_head = nn.ModuleList()
for i in range(self.num_stages):
self.decode_head.append(builder.build_head(decode_head[i]))
self.align_corners = self.decode_head[-1].align_corners
self.num_classes = self.decode_head[-1].num_classes
self.out_channels = self.decode_head[-1].out_channels
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg)
for i in range(1, self.num_stages):
out = self.decode_head[i].forward_test(x, out, img_metas,
self.test_cfg)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head[0].forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode_0'))
for i in range(1, self.num_stages):
# forward test again, maybe unnecessary for most methods.
if i == 1:
prev_outputs = self.decode_head[0].forward_test(
x, img_metas, self.test_cfg)
else:
prev_outputs = self.decode_head[i - 1].forward_test(
x, prev_outputs, img_metas, self.test_cfg)
loss_decode = self.decode_head[i].forward_train(
x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_decode, f'decode_{i}'))
return losses
| 3,373 | 36.488889 | 78 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/segmentors/encoder_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder, self).__init__(init_cfg)
if pretrained is not None:
assert backbone.get('pretrained') is None, \
'both backbone and segmentor set pretrained weight'
backbone.pretrained = pretrained
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
self.out_channels = self.decode_head.out_channels
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
out_channels = self.out_channels
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, out_channels, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
# remove padding area
resize_shape = img_meta[0]['img_shape'][:2]
preds = preds[:, :, :resize_shape[0], :resize_shape[1]]
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
# remove padding area
resize_shape = img_meta[0]['img_shape'][:2]
seg_logit = seg_logit[:, :, :resize_shape[0], :resize_shape[1]]
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
if self.out_channels == 1:
output = F.sigmoid(seg_logit)
else:
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
if self.out_channels == 1:
seg_pred = (seg_logit >
self.decode_head.threshold).to(seg_logit).squeeze(1)
else:
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def simple_test_logits(self, img, img_metas, rescale=True):
"""Test without augmentations.
Return numpy seg_map logits.
"""
seg_logit = self.inference(img[0], img_metas[0], rescale)
seg_logit = seg_logit.cpu().numpy()
return seg_logit
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
if self.out_channels == 1:
seg_pred = (seg_logit >
self.decode_head.threshold).to(seg_logit).squeeze(1)
else:
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test_logits(self, img, img_metas, rescale=True):
"""Test with augmentations.
Return seg_map logits. Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
imgs = img
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_logit = seg_logit.cpu().numpy()
return seg_logit
| 12,600 | 37.184848 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .embed import PatchEmbed
from .inverted_residual import InvertedResidual, InvertedResidualV3
from .make_divisible import make_divisible
from .res_layer import ResLayer
from .se_layer import SELayer
from .self_attention_block import SelfAttentionBlock
from .shape_convert import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc,
nlc_to_nchw)
from .up_conv_block import UpConvBlock
__all__ = [
'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual',
'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'PatchEmbed',
'nchw_to_nlc', 'nlc_to_nchw', 'nchw2nlc2nchw', 'nlc2nchw2nlc'
]
| 677 | 38.882353 | 75 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/embed.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner.base_module import BaseModule
from mmcv.utils import to_2tuple
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1.
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(PatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class PatchMerging(BaseModule):
"""Merge patch feature map.
This layer groups feature map by kernel_size, and applies norm and linear
layers to the grouped feature map. Our implementation uses `nn.Unfold` to
merge patch, which is about 25% faster than original implementation.
Instead, we need to modify pretrained models for compatibility.
Args:
in_channels (int): The num of input channels.
out_channels (int): The num of output channels.
kernel_size (int | tuple, optional): the kernel size in the unfold
layer. Defaults to 2.
stride (int | tuple, optional): the stride of the sliding blocks in the
unfold layer. Default: None. (Would be set as `kernel_size`)
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int | tuple, optional): dilation parameter in the unfold
layer. Default: 1.
bias (bool, optional): Whether to add bias in linear layer or not.
Defaults: False.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='LN').
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=2,
stride=None,
padding='corner',
dilation=1,
bias=False,
norm_cfg=dict(type='LN'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
if stride:
stride = stride
else:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of unfold
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.sampler = nn.Unfold(
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
sample_dim = kernel_size[0] * kernel_size[1] * in_channels
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, sample_dim)[1]
else:
self.norm = None
self.reduction = nn.Linear(sample_dim, out_channels, bias=bias)
def forward(self, x, input_size):
"""
Args:
x (Tensor): Has shape (B, H*W, C_in).
input_size (tuple[int]): The spatial shape of x, arrange as (H, W).
Default: None.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)
- out_size (tuple[int]): Spatial shape of x, arrange as
(Merged_H, Merged_W).
"""
B, L, C = x.shape
assert isinstance(input_size, Sequence), f'Expect ' \
f'input_size is ' \
f'`Sequence` ' \
f'but get {input_size}'
H, W = input_size
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W
# Use nn.Unfold to merge patch. About 25% faster than original method,
# but need to modify pretrained model for compatibility
if self.adap_padding:
x = self.adap_padding(x)
H, W = x.shape[-2:]
x = self.sampler(x)
# if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2)
out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *
(self.sampler.kernel_size[0] - 1) -
1) // self.sampler.stride[0] + 1
out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *
(self.sampler.kernel_size[1] - 1) -
1) // self.sampler.stride[1] + 1
output_size = (out_h, out_w)
x = x.transpose(1, 2) # B, H/2*W/2, 4*C
x = self.norm(x) if self.norm else x
x = self.reduction(x)
return x, output_size
| 12,216 | 35.909366 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False,
**kwargs):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
**kwargs))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
**kwargs),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
**kwargs)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 7,162 | 32.471963 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/make_divisible.py | # Copyright (c) OpenMMLab. All rights reserved.
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number to the nearest value that can be
divisible by the divisor. It is taken from the original tf repo. It ensures
that all layers have a channel number that is divisible by divisor. It can
be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float): The minimum ratio of the rounded channel number to
the original channel number. Default: 0.9.
Returns:
int: The modified output channel number.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
| 1,279 | 43.137931 | 116 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/res_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import Sequential
from torch import nn as nn
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| 3,395 | 34.010309 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from .make_divisible import make_divisible
class SELayer(nn.Module):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configured
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configured by the first dict and the
second activation layer will be configured by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0)).
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0))):
super(SELayer, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=make_divisible(channels // ratio, 8),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=make_divisible(channels // ratio, 8),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2,151 | 35.474576 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/self_attention_block.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule, constant_init
from torch import nn as nn
from torch.nn import functional as F
class SelfAttentionBlock(nn.Module):
"""General self-attention block/non-local block.
Please refer to https://arxiv.org/abs/1706.03762 for details about key,
query and value.
Args:
key_in_channels (int): Input channels of key feature.
query_in_channels (int): Input channels of query feature.
channels (int): Output channels of key/query transform.
out_channels (int): Output channels.
share_key_query (bool): Whether share projection weight between key
and query projection.
query_downsample (nn.Module): Query downsample module.
key_downsample (nn.Module): Key downsample module.
key_query_num_convs (int): Number of convs for key/query projection.
value_num_convs (int): Number of convs for value projection.
matmul_norm (bool): Whether normalize attention map with sqrt of
channels
with_out (bool): Whether use out projection.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict|None): Config of activation layers.
"""
def __init__(self, key_in_channels, query_in_channels, channels,
out_channels, share_key_query, query_downsample,
key_downsample, key_query_num_convs, value_out_num_convs,
key_query_norm, value_out_norm, matmul_norm, with_out,
conv_cfg, norm_cfg, act_cfg):
super(SelfAttentionBlock, self).__init__()
if share_key_query:
assert key_in_channels == query_in_channels
self.key_in_channels = key_in_channels
self.query_in_channels = query_in_channels
self.out_channels = out_channels
self.channels = channels
self.share_key_query = share_key_query
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.key_project = self.build_project(
key_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if share_key_query:
self.query_project = self.key_project
else:
self.query_project = self.build_project(
query_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.value_project = self.build_project(
key_in_channels,
channels if with_out else out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if with_out:
self.out_project = self.build_project(
channels,
out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.out_project = None
self.query_downsample = query_downsample
self.key_downsample = key_downsample
self.matmul_norm = matmul_norm
self.init_weights()
def init_weights(self):
"""Initialize weight of later layer."""
if self.out_project is not None:
if not isinstance(self.out_project, ConvModule):
constant_init(self.out_project, 0)
def build_project(self, in_channels, channels, num_convs, use_conv_module,
conv_cfg, norm_cfg, act_cfg):
"""Build projection layer for key/query/value/out."""
if use_conv_module:
convs = [
ConvModule(
in_channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
]
for _ in range(num_convs - 1):
convs.append(
ConvModule(
channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
else:
convs = [nn.Conv2d(in_channels, channels, 1)]
for _ in range(num_convs - 1):
convs.append(nn.Conv2d(channels, channels, 1))
if len(convs) > 1:
convs = nn.Sequential(*convs)
else:
convs = convs[0]
return convs
def forward(self, query_feats, key_feats):
"""Forward function."""
batch_size = query_feats.size(0)
query = self.query_project(query_feats)
if self.query_downsample is not None:
query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], -1)
query = query.permute(0, 2, 1).contiguous()
key = self.key_project(key_feats)
value = self.value_project(key_feats)
if self.key_downsample is not None:
key = self.key_downsample(key)
value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], -1)
value = value.reshape(*value.shape[:2], -1)
value = value.permute(0, 2, 1).contiguous()
sim_map = torch.matmul(query, key)
if self.matmul_norm:
sim_map = (self.channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, -1, *query_feats.shape[2:])
if self.out_project is not None:
context = self.out_project(context)
return context
| 6,173 | 37.347826 | 78 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/shape_convert.py | # Copyright (c) OpenMMLab. All rights reserved.
def nlc_to_nchw(x, hw_shape):
"""Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, L, C] before conversion.
hw_shape (Sequence[int]): The height and width of output feature map.
Returns:
Tensor: The output tensor of shape [N, C, H, W] after conversion.
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len doesn\'t match H, W'
return x.transpose(1, 2).reshape(B, C, H, W)
def nchw_to_nlc(x):
"""Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
Returns:
Tensor: The output tensor of shape [N, L, C] after conversion.
"""
assert len(x.shape) == 4
return x.flatten(2).transpose(1, 2).contiguous()
def nchw2nlc2nchw(module, x, contiguous=False, **kwargs):
"""Flatten [N, C, H, W] shape tensor `x` to [N, L, C] shape tensor. Use the
reshaped tensor as the input of `module`, and the convert the output of
`module`, whose shape is.
[N, L, C], to [N, C, H, W].
Args:
module (Callable): A callable object the takes a tensor
with shape [N, L, C] as input.
x (Tensor): The input tensor of shape [N, C, H, W].
contiguous:
contiguous (Bool): Whether to make the tensor contiguous
after each shape transform.
Returns:
Tensor: The output tensor of shape [N, C, H, W].
Example:
>>> import torch
>>> import torch.nn as nn
>>> norm = nn.LayerNorm(4)
>>> feature_map = torch.rand(4, 4, 5, 5)
>>> output = nchw2nlc2nchw(norm, feature_map)
"""
B, C, H, W = x.shape
if not contiguous:
x = x.flatten(2).transpose(1, 2)
x = module(x, **kwargs)
x = x.transpose(1, 2).reshape(B, C, H, W)
else:
x = x.flatten(2).transpose(1, 2).contiguous()
x = module(x, **kwargs)
x = x.transpose(1, 2).reshape(B, C, H, W).contiguous()
return x
def nlc2nchw2nlc(module, x, hw_shape, contiguous=False, **kwargs):
"""Convert [N, L, C] shape tensor `x` to [N, C, H, W] shape tensor. Use the
reshaped tensor as the input of `module`, and convert the output of
`module`, whose shape is.
[N, C, H, W], to [N, L, C].
Args:
module (Callable): A callable object the takes a tensor
with shape [N, C, H, W] as input.
x (Tensor): The input tensor of shape [N, L, C].
hw_shape: (Sequence[int]): The height and width of the
feature map with shape [N, C, H, W].
contiguous (Bool): Whether to make the tensor contiguous
after each shape transform.
Returns:
Tensor: The output tensor of shape [N, L, C].
Example:
>>> import torch
>>> import torch.nn as nn
>>> conv = nn.Conv2d(16, 16, 3, 1, 1)
>>> feature_map = torch.rand(4, 25, 16)
>>> output = nlc2nchw2nlc(conv, feature_map, (5, 5))
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len doesn\'t match H, W'
if not contiguous:
x = x.transpose(1, 2).reshape(B, C, H, W)
x = module(x, **kwargs)
x = x.flatten(2).transpose(1, 2)
else:
x = x.transpose(1, 2).reshape(B, C, H, W).contiguous()
x = module(x, **kwargs)
x = x.flatten(2).transpose(1, 2).contiguous()
return x
| 3,589 | 32.240741 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/models/utils/up_conv_block.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
class UpConvBlock(nn.Module):
"""Upsample convolution block in decoder for UNet.
This upsample convolution block consists of one upsample module
followed by one convolution block. The upsample module expands the
high-level low-resolution feature map and the convolution block fuses
the upsampled high-level low-resolution feature map and the low-level
high-resolution feature map from encoder.
Args:
conv_block (nn.Sequential): Sequential of convolutional layers.
in_channels (int): Number of input channels of the high-level
skip_channels (int): Number of input channels of the low-level
high-resolution feature map from encoder.
out_channels (int): Number of output channels.
num_convs (int): Number of convolutional layers in the conv_block.
Default: 2.
stride (int): Stride of convolutional layer in conv_block. Default: 1.
dilation (int): Dilation rate of convolutional layer in conv_block.
Default: 1.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict | None): Config dict for convolution layer.
Default: None.
norm_cfg (dict | None): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict | None): Config dict for activation layer in ConvModule.
Default: dict(type='ReLU').
upsample_cfg (dict): The upsample config of the upsample module in
decoder. Default: dict(type='InterpConv'). If the size of
high-level feature map is the same as that of skip feature map
(low-level feature map from encoder), it does not need upsample the
high-level feature map and the upsample_cfg is None.
dcn (bool): Use deformable convolution in convolutional layer or not.
Default: None.
plugins (dict): plugins for convolutional layers. Default: None.
"""
def __init__(self,
conv_block,
in_channels,
skip_channels,
out_channels,
num_convs=2,
stride=1,
dilation=1,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
dcn=None,
plugins=None):
super(UpConvBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.conv_block = conv_block(
in_channels=2 * skip_channels,
out_channels=out_channels,
num_convs=num_convs,
stride=stride,
dilation=dilation,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
dcn=None,
plugins=None)
if upsample_cfg is not None:
self.upsample = build_upsample_layer(
cfg=upsample_cfg,
in_channels=in_channels,
out_channels=skip_channels,
with_cp=with_cp,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.upsample = ConvModule(
in_channels,
skip_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, skip, x):
"""Forward function."""
x = self.upsample(x)
out = torch.cat([skip, x], dim=1)
out = self.conv_block(out)
return out
| 4,016 | 38 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/ops/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .encoding import Encoding
from .wrappers import Upsample, resize
__all__ = ['Upsample', 'resize', 'Encoding']
| 164 | 26.5 | 47 | py |
mmsegmentation | mmsegmentation-master/mmseg/ops/encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import nn
from torch.nn import functional as F
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.channels, self.num_codes = channels, num_codes
std = 1. / ((num_codes * channels)**0.5)
# [num_codes, channels]
self.codewords = nn.Parameter(
torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std),
requires_grad=True)
# [num_codes]
self.scale = nn.Parameter(
torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0),
requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (
expanded_x - reshaped_codewords).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) *
(expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
# [batch_size, channels, height, width]
batch_size = x.size(0)
# [batch_size, height x width, channels]
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
# assignment_weights: [batch_size, channels, num_codes]
assignment_weights = F.softmax(
self.scaled_l2(x, self.codewords, self.scale), dim=2)
# aggregate
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \
f'x{self.channels})'
return repr_str
| 2,836 | 36.328947 | 78 | py |
mmsegmentation | mmsegmentation-master/mmseg/ops/wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > input_w:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Upsample(nn.Module):
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
| 1,874 | 35.057692 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .set_env import setup_multi_processes
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes', 'build_ddp', 'build_dp', 'get_device'
]
| 414 | 33.583333 | 66 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/collect_env.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmseg
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| 484 | 24.526316 | 76 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/logger.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmseg".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger
| 947 | 31.689655 | 79 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os.path as osp
import warnings
def find_latest_checkpoint(path, suffix='pth'):
"""This function is for finding the latest checkpoint.
It will be used when automatically resume, modified from
https://github.com/open-mmlab/mmdetection/blob/dev-v2.20.0/mmdet/utils/misc.py
Args:
path (str): The path to find checkpoints.
suffix (str): File extension for the checkpoint. Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
"""
if not osp.exists(path):
warnings.warn("The path of the checkpoints doesn't exist.")
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('The are no checkpoints in the path')
return None
latest = -1
latest_path = ''
for checkpoint in checkpoints:
if len(checkpoint) < len(latest_path):
continue
# `count` is iteration number, as checkpoints are saved as
# 'iter_xx.pth' or 'epoch_xx.pth' and xx is iteration number.
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
| 1,436 | 33.214286 | 82 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/set_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import cv2
import torch.multiprocessing as mp
from ..utils import get_root_logger
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
logger = get_root_logger()
# set multi-process start method
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', None)
current_method = mp.get_start_method(allow_none=True)
if mp_start_method in ('fork', 'spawn', 'forkserver'):
logger.info(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`.')
mp.set_start_method(mp_start_method, force=True)
else:
logger.info(
f'Multi-processing start method is `{mp_start_method}`')
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', None)
if isinstance(opencv_num_threads, int):
logger.info(f'OpenCV num_threads is `{opencv_num_threads}`')
cv2.setNumThreads(opencv_num_threads)
else:
logger.info(f'OpenCV num_threads is `{cv2.getNumThreads()}')
if cfg.data.workers_per_gpu > 1:
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
omp_num_threads = cfg.get('omp_num_threads', None)
if 'OMP_NUM_THREADS' not in os.environ:
if isinstance(omp_num_threads, int):
logger.info(f'OMP num threads is {omp_num_threads}')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
else:
logger.info(f'OMP num threads is {os.environ["OMP_NUM_THREADS"] }')
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ:
mkl_num_threads = cfg.get('mkl_num_threads', None)
if isinstance(mkl_num_threads, int):
logger.info(f'MKL num threads is {mkl_num_threads}')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
else:
logger.info(f'MKL num threads is {os.environ["MKL_NUM_THREADS"]}')
| 2,311 | 40.285714 | 116 | py |
mmsegmentation | mmsegmentation-master/mmseg/utils/util_distribution.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmseg import digit_version
dp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel}
ddp_factory = {'cuda': MMDistributedDataParallel}
def build_dp(model, device='cuda', dim=0, *args, **kwargs):
"""build DataParallel module by device type.
if device is cuda, return a MMDataParallel module; if device is mlu,
return a MLUDataParallel module.
Args:
model (:class:`nn.Module`): module to be parallelized.
device (str): device type, cuda, cpu or mlu. Defaults to cuda.
dim (int): Dimension used to scatter the data. Defaults to 0.
Returns:
:class:`nn.Module`: parallelized module.
"""
if device == 'cuda':
model = model.cuda()
elif device == 'mlu':
assert digit_version(mmcv.__version__) >= digit_version('1.5.0'), \
'Please use MMCV >= 1.5.0 for MLU training!'
from mmcv.device.mlu import MLUDataParallel
dp_factory['mlu'] = MLUDataParallel
model = model.mlu()
elif device == 'npu':
assert digit_version(mmcv.__version__) >= digit_version('1.7.0'), \
'Please use MMCV >= 1.7.0 for NPU training!'
from mmcv.device.npu import NPUDataParallel
torch.npu.set_compile_mode(jit_compile=False)
dp_factory['npu'] = NPUDataParallel
model = model.npu()
return dp_factory[device](model, dim=dim, *args, **kwargs)
def build_ddp(model, device='cuda', *args, **kwargs):
"""Build DistributedDataParallel module by device type.
If device is cuda, return a MMDistributedDataParallel module;
if device is mlu, return a MLUDistributedDataParallel module.
Args:
model (:class:`nn.Module`): module to be parallelized.
device (str): device type, mlu or cuda.
Returns:
:class:`nn.Module`: parallelized module.
References:
.. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
"""
assert device in ['cuda', 'mlu', 'npu'], 'Only available for cuda, '\
'npu or mlu devices.'
if device == 'cuda':
model = model.cuda()
elif device == 'mlu':
assert digit_version(mmcv.__version__) >= digit_version('1.5.0'), \
'Please use MMCV >= 1.5.0 for MLU training!'
from mmcv.device.mlu import MLUDistributedDataParallel
ddp_factory['mlu'] = MLUDistributedDataParallel
model = model.mlu()
elif device == 'npu':
assert digit_version(mmcv.__version__) >= digit_version('1.7.0'), \
'Please use MMCV >= 1.7.0 for NPU training!'
from mmcv.device.npu import NPUDistributedDataParallel
torch.npu.set_compile_mode(jit_compile=False)
ddp_factory['npu'] = NPUDistributedDataParallel
model = model.npu()
return ddp_factory[device](model, *args, **kwargs)
def is_mlu_available():
"""Returns a bool indicating if MLU is currently available."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_npu_available():
"""Returns a bool indicating if NPU is currently available."""
return hasattr(torch, 'npu') and torch.npu.is_available()
def get_device():
"""Returns an available device, cpu, npu, cuda or mlu."""
is_device_available = {
'npu': is_npu_available(),
'cuda': torch.cuda.is_available(),
'mlu': is_mlu_available()
}
device_list = [k for k, v in is_device_available.items() if v]
return device_list[0] if len(device_list) >= 1 else 'cpu'
| 3,725 | 34.485714 | 75 | py |
mmsegmentation | mmsegmentation-master/projects/README.md | # Projects
Implementing new models and features into OpenMMLab's algorithm libraries could be troublesome due to the rigorous requirements on code quality, which could hinder the fast iteration of SOTA models and might discourage our members from sharing their latest outcomes here.
And that's why we have this `Projects/` folder now, where some experimental features, frameworks and models are placed, only needed to satisfy the minimum requirement on the code quality, and can be used as standalone libraries. Users are welcome to use them if they [use MMSegmentation from source](https://mmsegmentation.readthedocs.io/en/latest/get_started.html#best-practices).
Everyone is welcome to post their implementation of any great ideas in this folder! If you wish to start your own project, please go through the [example project](example_project/) for the best practice.
Note: The core maintainers of MMSegmentation only ensure the results are reproducible and the code quality meets its claim at the time each project was submitted, but they may not be responsible for future maintenance. The original authors take responsibility for maintaining their own projects.
| 1,169 | 116 | 381 | md |
mmsegmentation | mmsegmentation-master/projects/dest/README.md | # DEST
[DEST: Depth Estimation with Simplified Transformer](https://arxiv.org/abs/2204.13791)
## Description
Transformer and its variants have shown state-of-the-art results in many vision tasks recently, ranging from image classification to dense prediction. Despite of their success, limited work has been reported on improving the model efficiency for deployment in latency-critical applications, such as autonomous driving and robotic navigation. In this paper, we aim at improving upon the existing transformers in vision, and propose a method for Dense Estimation with Simplified Transformer (DEST), which is efficient and particularly suitable for deployment on GPU-based platforms. Through strategic design choices, our model leads to significant reduction in model size, complexity, as well as inference latency, while achieving superior accuracy as compared to state-of-the-art in the task of self-supervised monocular depth estimation. We also show that our design generalize well to other dense prediction task such as semantic segmentation without bells and whistles.
## Usage
### Prerequisites
- Python 3.8.12
- PyTorch 1.11
- mmcv v1.7.0
- Install [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) from source
All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the mmsegmentaions directory so that Python can locate the configuration files in mmsegmentation.
### Dataset preparing
Preparing `cityscapes` dataset following this [Dataset Preparing Guide](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#prepare-datasets)
### Training commands
```shell
mim train mmsegmentation projects/dest/configs/dest_simpatt-b0_1024x1024_160k_cityscapes.py --work-dir work_dirs/dest
```
To train on multiple GPUs, e.g. 8 GPUs, run the following command:
```shell
mim train mmsegmentation projects/dest/configs/dest_simpatt-b0_1024x1024_160k_cityscapes.py --work-dir work_dirs/dest --launcher pytorch --gpus 8
```
### Testing commands
```shell
mim test mmsegmentation projects/dest/configs/dest_simpatt-b0_1024x1024_160k_cityscapes.py --work-dir work_dirs/dest --checkpoint ${CHECKPOINT_PATH} --eval mIoU
```
## Results and models
### Cityscapes
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| DEST | SMIT-B0 | 1024x1024 | 160000 | - | - | 64.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b0_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b0_1024x1024_160k_cityscapes_20230105_232025-11f73f34.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b0_1024x1024_160k_cityscapes_20230105_232025.log) |
| DEST | SMIT-B1 | 1024x1024 | 160000 | - | - | 68.21 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b1_1024x1024_160k_cityscapes_20230105_232358-0dd4e86e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b1_1024x1024_160k_cityscapes_20230105_232358.logmmsegmentation/v0.5/dest/dest_simpatt-b1_1024x1024_160k_cityscapes_20230105_232358.log) |
| DEST | SMIT-B2 | 1024x1024 | 160000 | - | - | 71.89 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b2_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b2_1024x1024_160k_cityscapes_20230105_231943-b06319ae.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b2_1024x1024_160k_cityscapes_20230105_231943.log) |
| DEST | SMIT-B3 | 1024x1024 | 160000 | - | - | 73.51 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b3_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b3_1024x1024_160k_cityscapes_20230105_231800-ee4cec5c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b3_1024x1024_160k_cityscapes_20230105_231800.log) |
| DEST | SMIT-B4 | 1024x1024 | 160000 | - | - | 73.99 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b4_1024x1024_160k_cityscapes_20230105_232155-3ca9f4fc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b4_1024x1024_160k_cityscapes_20230105_232155.log) |
| DEST | SMIT-B5 | 1024x1024 | 160000 | - | - | 75.28 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b5_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b5_1024x1024_160k_cityscapes_20230105_231411-e83819b5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b5_1024x1024_160k_cityscapes_20230105_231411.log) |
Note:
- The above models are all training from scratch without pretrained backbones. Accuracy can be further enhanced by appropriate pretraining.
- Training of DEST is not very stable, which is sensitive to random seeds.
## Citation
```bibtex
@article{YangDEST,
title={Depth Estimation with Simplified Transformer},
author={Yang, John and An, Le and Dixit, Anurag and Koo, Jinkyu and Park, Su Inn},
journal={arXiv preprint arXiv:2204.13791},
year={2022}
}
```
## Checklist
- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.
- [x] Finish the code
- [x] Basic docstrings & proper citation
- [x] Test-time correctness
- [x] A full README
- [x] Milestone 2: Indicates a successful model implementation.
- [x] Training-time correctness
- [ ] Milestone 3: Good to be a part of our core package!
- [ ] Type hints and docstrings
- [ ] Unit tests
- [ ] Code polishing
- [ ] Metafile.yml
- [ ] Move your modules into the core package following the codebase's file hierarchy structure.
- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.
| 8,001 | 79.02 | 971 | md |
mmsegmentation | mmsegmentation-master/projects/dest/configs/README.md | # DEST
[DEST: Depth Estimation with Simplified Transformer](https://arxiv.org/abs/2204.13791)
## Introduction
<!-- [ALGORITHM] -->
<a href="https://github.com/NVIDIA/DL4AGX/tree/master/DEST">Official Repo</a>
## Abstract
<!-- [ABSTRACT] -->
Transformer and its variants have shown state-of-the-art results in many vision tasks recently, ranging from image classification to dense prediction. Despite of their success, limited work has been reported on improving the model efficiency for deployment in latency-critical applications, such as autonomous driving and robotic navigation. In this paper, we aim at improving upon the existing transformers in vision, and propose a method for Dense Estimation with Simplified Transformer (DEST), which is efficient and particularly suitable for deployment on GPU-based platforms. Through strategic design choices, our model leads to significant reduction in model size, complexity, as well as inference latency, while achieving superior accuracy as compared to state-of-the-art in the task of self-supervised monocular depth estimation. We also show that our design generalize well to other dense prediction task such as semantic segmentation without bells and whistles.
<!-- [IMAGE] -->
<div align=center>
<img src="https://user-images.githubusercontent.com/76149310/219313665-49fa89ed-4973-4496-bb33-3256f107e82d.png" width="70%"/>
</div>
## Citation
```bibtex
@article{YangDEST,
title={Depth Estimation with Simplified Transformer},
author={Yang, John and An, Le and Dixit, Anurag and Koo, Jinkyu and Park, Su Inn},
journal={arXiv preprint arXiv:2204.13791},
year={2022}
}
```
## Results and models
### Cityscapes
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| DEST | SMIT-B0 | 1024x1024 | 160000 | - | - | 64.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b0_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b0_1024x1024_160k_cityscapes_20230105_232025-11f73f34.pth) |
| DEST | SMIT-B1 | 1024x1024 | 160000 | - | - | 68.21 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b1_1024x1024_160k_cityscapes_20230105_232358-0dd4e86e.pth) |
| DEST | SMIT-B2 | 1024x1024 | 160000 | - | - | 71.89 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b2_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b2_1024x1024_160k_cityscapes_20230105_231943-b06319ae.pth) |
| DEST | SMIT-B3 | 1024x1024 | 160000 | - | - | 73.51 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b3_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b3_1024x1024_160k_cityscapes_20230105_231800-ee4cec5c.pth) |
| DEST | SMIT-B4 | 1024x1024 | 160000 | - | - | 73.99 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b4_1024x1024_160k_cityscapes_20230105_232155-3ca9f4fc.pth) |
| DEST | SMIT-B5 | 1024x1024 | 160000 | - | - | 75.28 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dest/dest_simpatt-b5_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dest/dest_simpatt-b5_1024x1024_160k_cityscapes_20230105_231411-e83819b5.pth) |
Note:
- The above models are all training from scratch without pretrained backbones. Accuracy can be further enhanced by appropriate pretraining.
- Training of DEST is not very stable, which is sensitive to random seeds.
| 4,792 | 92.980392 | 971 | md |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b0.py | # model settings
embed_dims = [32, 64, 160, 256]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedMixTransformer',
in_channels=3,
embed_dims=embed_dims,
num_stages=4,
num_layers=[2, 2, 2, 2],
num_heads=[1, 2, 5, 8],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
out_indices=(0, 1, 2, 3),
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_cfg=norm_cfg),
decode_head=dict(
type='DESTHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
channels=32,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
| 1,143 | 29.105263 | 75 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b0_1024x1024_160k_cityscapes.py | _base_ = [
'./dest_simpatt-b0.py',
'../../../configs/_base_/datasets/cityscapes_1024x1024.py',
'../../../configs/_base_/default_runtime.py',
'../../../configs/_base_/schedules/schedule_160k.py'
]
custom_imports = dict(imports=['projects.dest.models'])
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(
_delete_=True,
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0,
min_lr=0.0,
by_epoch=False)
data = dict(samples_per_gpu=1, workers_per_gpu=1)
| 797 | 22.470588 | 63 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b1_1024x1024_160k_cityscapes.py | _base_ = ['./dest_simpatt-b0_1024x1024_160k_cityscapes.py']
embed_dims = [64, 128, 250, 320]
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(embed_dims=embed_dims),
decode_head=dict(in_channels=embed_dims, channels=64))
| 258 | 24.9 | 59 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b2_1024x1024_160k_cityscapes.py | _base_ = ['./dest_simpatt-b0_1024x1024_160k_cityscapes.py']
embed_dims = [64, 128, 250, 320]
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(embed_dims=embed_dims, num_layers=[3, 3, 6, 3]),
decode_head=dict(in_channels=embed_dims, channels=64))
| 283 | 27.4 | 66 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b3_1024x1024_160k_cityscapes.py | _base_ = ['./dest_simpatt-b0_1024x1024_160k_cityscapes.py']
embed_dims = [64, 128, 250, 320]
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(embed_dims=embed_dims, num_layers=[3, 6, 8, 3]),
decode_head=dict(in_channels=embed_dims, channels=64))
| 584 | 24.434783 | 66 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b4_1024x1024_160k_cityscapes.py | _base_ = ['./dest_simpatt-b0_1024x1024_160k_cityscapes.py']
embed_dims = [64, 128, 250, 320]
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(embed_dims=embed_dims, num_layers=[3, 8, 12, 5]),
decode_head=dict(in_channels=embed_dims, channels=64))
| 585 | 24.478261 | 67 | py |
mmsegmentation | mmsegmentation-master/projects/dest/configs/dest_simpatt-b5_1024x1024_160k_cityscapes.py | _base_ = ['./dest_simpatt-b0_1024x1024_160k_cityscapes.py']
embed_dims = [64, 128, 250, 320]
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(embed_dims=embed_dims, num_layers=[3, 10, 16, 5]),
decode_head=dict(in_channels=embed_dims, channels=64))
| 586 | 24.521739 | 68 | py |
mmsegmentation | mmsegmentation-master/projects/dest/models/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .dest_head import DESTHead
from .smit import SimplifiedMixTransformer
__all__ = ['SimplifiedMixTransformer', 'DESTHead']
| 175 | 28.333333 | 50 | py |
mmsegmentation | mmsegmentation-master/projects/dest/models/dest_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.models import HEADS
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
@HEADS.register_module()
class DESTHead(BaseDecodeHead):
def __init__(self, interpolate_mode='bilinear', **kwargs):
super().__init__(input_transform='multiple_select', **kwargs)
self.interpolate_mode = interpolate_mode
num_inputs = len(self.in_channels)
assert num_inputs == len(self.in_index)
self.fuse_in_channels = self.in_channels.copy()
for i in range(num_inputs - 1):
self.fuse_in_channels[i] += self.fuse_in_channels[i + 1]
self.convs = nn.ModuleList()
for i in range(num_inputs):
self.convs.append(
ConvModule(
in_channels=self.in_channels[i],
out_channels=self.in_channels[i],
kernel_size=1,
stride=1,
act_cfg=self.act_cfg))
self.fuse_convs = nn.ModuleList()
for i in range(num_inputs):
self.fuse_convs.append(
ConvModule(
in_channels=self.fuse_in_channels[i],
out_channels=self.in_channels[i],
kernel_size=3,
stride=1,
padding=1,
act_cfg=self.act_cfg))
self.upsample = nn.ModuleList([
nn.Sequential(nn.Upsample(scale_factor=2, mode=interpolate_mode))
] * len(self.in_channels))
def forward(self, inputs):
feat = None
for idx in reversed(range(len(inputs))):
x = self.convs[idx](inputs[idx])
if idx != len(inputs) - 1:
x = torch.concat([feat, x], dim=1)
x = self.upsample[idx](x)
feat = self.fuse_convs[idx](x)
return self.cls_seg(feat)
| 1,956 | 34.581818 | 77 | py |
mmsegmentation | mmsegmentation-master/projects/dest/models/smit.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
import torch
import torch.nn as nn
from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.utils.weight_init import trunc_normal_init
from mmcv.runner import BaseModule, ModuleList, Sequential
from mmcv.utils import to_2tuple
from mmseg.models import BACKBONES
from mmseg.models.utils.embed import AdaptivePadding
class SimplifiedPatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement SimplifiedPatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(SimplifiedPatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_init(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, embed_dims, out_h * out_w)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
if self.norm is not None:
x = self.norm(x)
x = x.flatten(2)
return x, out_size
class DWConv(nn.Module):
def __init__(self, dims):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dims, dims, 3, 1, 1, bias=True, groups=dims)
def forward(self, x, H, W):
B, C, N = x.shape
x = x.reshape(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2)
return x
class MixFFN(nn.Module):
"""An implementation of MixFFN of DEST.
The differences between MixFFN & FFN:
1. Use 1X1 Conv to replace Linear layer.
2. Introduce 3X3 Conv to encode positional information.
Args:
embed_dims (int): The feature dimension. Same as
`MultiheadAttention`. Defaults: 256.
feedforward_channels (int): The hidden dimension of FFNs.
Defaults: 1024.
act_cfg (dict, optional): The activation config for FFNs.
Default: dict(type='ReLU')
ffn_drop (float, optional): Probability of an element to be
zeroed in FFN. Default 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
feedforward_channels,
act_cfg=dict(type='ReLU'),
ffn_drop=0.,
norm_cfg=dict(type='SyncBN', requires_grad=True),
dropout_layer=None,
init_cfg=None):
super(MixFFN, self).__init__()
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.act_cfg = act_cfg
activate = build_activation_layer(act_cfg)
in_channels = embed_dims
fc1 = nn.Conv1d(
in_channels=in_channels,
out_channels=feedforward_channels,
kernel_size=1,
stride=1)
norm1 = build_norm_layer(norm_cfg, feedforward_channels)[1]
self.dwconv = DWConv(feedforward_channels)
norm2 = build_norm_layer(norm_cfg, feedforward_channels)[1]
fc2 = nn.Conv1d(
in_channels=feedforward_channels,
out_channels=in_channels,
kernel_size=1,
stride=1)
drop = nn.Dropout(ffn_drop)
pre_layers = [fc1, norm1]
post_layers = [norm2, activate, drop, fc2, drop]
self.pre_layers = Sequential(*pre_layers)
self.post_layers = Sequential(*post_layers)
self.dropout_layer = build_dropout(
dropout_layer) if dropout_layer else torch.nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv1d):
trunc_normal_init(m, std=.02, bias=0.)
def forward(self, x, hw_shape, identity):
out = self.pre_layers(x)
out = self.dwconv(out, hw_shape[0], hw_shape[1])
out = self.post_layers(out)
return identity + self.dropout_layer(out)
class SimplifiedAttention(nn.Module):
"""An implementation of Simplified Multi-head Attention of DEST.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head
Attention of Segformer. Default: 1.
qkv_bias (bool): enable bias for qkv if True. Default True.
qk_scale (float, optional): scales for query and key. Default: None.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='SyncBN', requires_grad=True).
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
sr_ratio=1,
qkv_bias=False,
qk_scale=None,
dropout_layer=None,
norm_cfg=dict(type='SyncBN', requires_grad=True)):
super().__init__()
self.embed_dims = embed_dims
self.num_heads = num_heads
head_dim = embed_dims // num_heads
self.scale = qk_scale or head_dim**-0.5
self.q = nn.Conv1d(embed_dims, embed_dims, 1, bias=qkv_bias)
self.k = nn.Conv1d(embed_dims, embed_dims, 1, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv1d(embed_dims, embed_dims, 1)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(
embed_dims, embed_dims, kernel_size=sr_ratio, stride=sr_ratio)
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.dropout_layer = build_dropout(
dropout_layer) if dropout_layer else torch.nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv1d):
trunc_normal_init(m, std=.02, bias=0.)
def forward(self, x, hw_shape, identity):
H, W = hw_shape
B, C, N = x.shape
q = self.q(x)
q = q.reshape(B, self.num_heads, C // self.num_heads, N)
q = q.permute(0, 1, 3, 2)
if self.sr_ratio > 1:
x_ = x.reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1)
x_ = self.norm1(x_)
k = self.k(x_).reshape(B, self.num_heads, C // self.num_heads, -1)
else:
k = self.k(x).reshape(B, self.num_heads, C // self.num_heads, -1)
v = torch.mean(x, 2, True).repeat(1, 1,
self.num_heads).transpose(-2, -1)
attn = (q @ k) * self.scale
attn, _ = torch.max(attn, -1)
out = (attn.transpose(-2, -1) @ v)
out = out.transpose(-2, -1)
out = self.proj(out)
return identity + self.dropout_layer(out)
class SimpliefiedTransformerEncoderLayer(BaseModule):
"""Implements one encoder layer in DEST.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed.
after the feed forward layer. Default 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.0.
qkv_bias (bool): enable bias for qkv if True.
Default: True.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: False.
qk_scale (float, optional): scales for query and key. Default: None.
init_cfg (dict, optional): Initialization config dict.
Default:None.
sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head
Attention of Segformer. Default: 1.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
qkv_bias=True,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='SyncBN'),
batch_first=True,
qk_scale=None,
sr_ratio=1,
with_cp=False):
super(SimpliefiedTransformerEncoderLayer, self).__init__()
# The ret[0] of build_norm_layer is norm name.
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = SimplifiedAttention(
embed_dims=embed_dims,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
sr_ratio=sr_ratio,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate))
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = MixFFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, hw_shape):
x = self.attn(self.norm1(x), hw_shape, identity=x)
x = self.ffn(self.norm2(x), hw_shape, identity=x)
return x
@BACKBONES.register_module()
class SimplifiedMixTransformer(BaseModule):
"""The backbone of DEST.
This backbone is the implementation of `SegFormer: Simple and
Efficient Design for Semantic Segmentation with
Transformers <https://arxiv.org/abs/2105.15203>`_.
Args:
in_channels (int): Number of input channels. Default: 3.
embed_dims (Sequence[int]): Embedding dimensions of each transformer
encode layer. Default: [32, 64, 160, 256].
num_stags (int): The num of stages. Default: 4.
num_layers (Sequence[int]): The layer number of each transformer encode
layer. Default: [3, 4, 6, 3].
num_heads (Sequence[int]): The attention heads of each transformer
encode layer. Default: [1, 2, 4, 8].
patch_sizes (Sequence[int]): The patch_size of each overlapped patch
embedding. Default: [7, 3, 3, 3].
strides (Sequence[int]): The stride of each overlapped patch embedding.
Default: [4, 2, 2, 2].
sr_ratios (Sequence[int]): The spatial reduction rate of each
transformer encode layer. Default: [8, 4, 2, 1].
out_indices (Sequence[int] | int): Output from which stages.
Default: (0, 1, 2, 3).
mlp_ratios (Sequence[int]): ratios of mlp hidden dim to embedding dim.
Default: [8, 8, 4, 4].
qkv_bias (bool): Enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): stochastic depth rate. Default 0.0
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
pretrained (str, optional): model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
"""
def __init__(self,
in_channels=3,
embed_dims=[32, 64, 160, 256],
num_stages=4,
num_layers=[2, 2, 2, 2],
num_heads=[1, 2, 4, 8],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
out_indices=(0, 1, 2, 3),
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='SyncBN', requires_grad=True),
pretrained=None,
init_cfg=None,
with_cp=False):
super(SimplifiedMixTransformer, self).__init__(init_cfg=init_cfg)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.embed_dims = embed_dims
self.num_stages = num_stages
self.num_layers = num_layers
self.num_heads = num_heads
self.patch_sizes = patch_sizes
self.strides = strides
self.sr_ratios = sr_ratios
self.with_cp = with_cp
assert num_stages == len(num_layers) == len(num_heads) \
== len(patch_sizes) == len(strides) == len(sr_ratios)
self.out_indices = out_indices
assert max(out_indices) < self.num_stages
# transformer encoder
dpr = [
x.item()
for x in torch.linspace(0, drop_path_rate, sum(num_layers))
] # stochastic num_layer decay rule
cur = 0
self.layers = ModuleList()
for i, num_layer in enumerate(num_layers):
patch_embed = SimplifiedPatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims[i],
kernel_size=patch_sizes[i],
stride=strides[i],
padding=patch_sizes[i] // 2,
norm_cfg=norm_cfg)
layer = ModuleList([
SimpliefiedTransformerEncoderLayer(
embed_dims=embed_dims[i],
num_heads=num_heads[i],
feedforward_channels=mlp_ratios[i] * embed_dims[i],
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[cur + idx],
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
sr_ratio=sr_ratios[i]) for idx in range(num_layer)
])
in_channels = embed_dims[i]
# The ret[0] of build_norm_layer is norm name.
norm = build_norm_layer(norm_cfg, embed_dims[i])[1]
self.layers.append(ModuleList([patch_embed, layer, norm]))
cur += num_layer
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
outs = []
for i, layer in enumerate(self.layers):
x, (H, W) = layer[0](x)
for block in layer[1]:
x = block(x, (H, W))
x = layer[2](x)
N, C, L = x.shape
x = x.reshape(N, C, H, W)
outs.append(x)
return outs
| 21,741 | 37.964158 | 79 | py |
mmsegmentation | mmsegmentation-master/projects/example_project/README.md | # Dummy ResNet Wrapper
This is an example README for community `projects/`. We have provided detailed explanations for each field in the form of html comments, which are visible when you read the source of this README file. If you wish to submit your project to our main repository, then all the fields in this README are mandatory for others to understand what you have achieved in this implementation. For more details, read our [contribution guide](https://github.com/open-mmlab/mmsegmentation/blob/master/.github/CONTRIBUTING.md) or approach us in [Discussions](https://github.com/open-mmlab/mmsegmentation/discussions).
## Description
<!-- Share any information you would like others to know. For example:
Author: @xxx.
This is an implementation of \[XXX\]. -->
This project implements a dummy ResNet wrapper, which literally does nothing new but prints "hello world" during initialization.
## Usage
<!-- For a typical model, this section should contain the commands for training and testing. You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. -->
### Prerequisites
- Python 3.7
- PyTorch 1.6 or higher
- [MIM](https://github.com/open-mmlab/mim) v0.33 or higher
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation) v0.29.1 or higher
All the commands below rely on the correct configuration of `PYTHONPATH`, which should point to the project's directory so that Python can locate the module files. In `example_project/` root directory, run the following line to add the current directory to `PYTHONPATH`:
```shell
export PYTHONPATH=`pwd`:$PYTHONPATH
```
### Training commands
```shell
mim train mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet
```
To train on multiple GPUs, e.g. 8 GPUs, run the following command:
```shell
mim train mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet --launcher pytorch --gpus 8
```
### Testing commands
```shell
mim test mmsegmentation configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py --work-dir work_dirs/dummy_resnet --checkpoint ${CHECKPOINT_PATH} --eval mIoU
```
<!-- List the results as usually done in other model's README. [Example](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn#results-and-models)
You should claim whether this is based on the pre-trained weights, which are converted from the official release; or it's a reproduced result obtained from retraining the model in this project. -->
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [config](configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) |
## Citation
<!-- You may remove this section if not applicable. -->
```bibtex
@misc{mmseg2020,
title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark},
author={MMSegmentation Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}},
year={2020}
}
```
## Checklist
Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress.
<!-- The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.
OpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.
Note that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.
A project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->
- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.
- [ ] Finish the code
<!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmseg.registry.MODELS` and configurable via a config file. -->
- [ ] Basic docstrings & proper citation
<!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->
- [ ] Test-time correctness
<!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->
- [ ] A full README
<!-- As this template does. -->
- [ ] Milestone 2: Indicates a successful model implementation.
- [ ] Training-time correctness
<!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->
- [ ] Milestone 3: Good to be a part of our core package!
- [ ] Type hints and docstrings
<!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/utils/misc.py#L7) -->
- [ ] Unit tests
<!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmsegmentation/blob/master/tests/test_utils/test_misc.py) -->
- [ ] Code polishing
<!-- Refactor your code according to reviewer's comment. -->
- [ ] Metafile.yml
<!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn.yml) -->
- [ ] Move your modules into the core package following the codebase's file hierarchy structure.
<!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/README.md) -->
- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.
| 8,356 | 63.284615 | 601 | md |
mmsegmentation | mmsegmentation-master/projects/example_project/configs/fcn_dummy-r50-d8_4xb2-40k_cityscapes-512x1024.py | # Copyright (c) OpenMMLab. All rights reserved.
_base_ = ['../../../configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py']
custom_imports = dict(imports=['projects.example_project.dummy'])
model = dict(backbone=dict(type='DummyResNet'))
| 236 | 32.857143 | 71 | py |
mmsegmentation | mmsegmentation-master/projects/example_project/dummy/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .dummy_resnet import DummyResNet
__all__ = ['DummyResNet']
| 113 | 21.8 | 47 | py |
mmsegmentation | mmsegmentation-master/projects/example_project/dummy/dummy_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmseg.models import BACKBONES
from mmseg.models.backbones import ResNetV1c
@BACKBONES.register_module()
class DummyResNet(ResNetV1c):
"""Implements a dummy ResNet wrapper for demonstration purpose.
Args:
**kwargs: All the arguments are passed to the parent class.
"""
def __init__(self, **kwargs) -> None:
print('Hello world!')
super().__init__(**kwargs)
| 451 | 27.25 | 67 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/README.md | # KITTI STEP Dataset
Support **`KITTI STEP Dataset`**
## Description
Author: TimoK93
This project implements **`KITTI STEP Dataset`**
### Dataset preparing
After registration, the data images could be download from [KITTI-STEP](http://www.cvlibs.net/datasets/kitti/eval_step.php)
You may need to follow the following structure for dataset preparation after downloading KITTI-STEP dataset.
```
mmsegmentation
├── mmseg
├── tools
├── configs
├── data
│ ├── kitti_step
│ │ ├── testing
│ │ ├── training
│ │ ├── panoptic_maps
```
Run the preparation script to generate label files and kitti subsets by executing
```shell
python tools/convert_datasets/kitti_step.py /path/to/kitti_step
```
After executing the script, your directory should look like
```
mmsegmentation
├── mmseg
├── tools
├── configs
├── data
│ ├── kitti_step
│ │ ├── testing
│ │ ├── training
│ │ ├── panoptic_maps
│ │ ├── training_openmmlab
│ │ ├── panoptic_maps_openmmlab
```
### Training commands
```bash
# Dataset train commands
# at `mmsegmentation` folder
bash tools/dist_train.sh projects/kitti_step_dataset/configs/segformer/segformer_mit-b5_368x368_160k_kittistep.py 8
```
### Testing commands
```bash
mim test mmsegmentation projects/kitti_step_dataset/configs/segformer/segformer_mit-b5_368x368_160k_kittistep.py --work-dir work_dirs/segformer_mit-b5_368x368_160k_kittistep --checkpoint ${CHECKPOINT_PATH} --eval mIoU
```
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | model | log |
| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Segformer | MIT-B5 | 368x368 | 160000 | - | - | 65.05 | - | [config](configs/segformer/segformer_mit-b5_368x368_160k_kittistep.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_368x368_160k_kittistep/segformer_mit-b5_368x368_160k_kittistep_20230506_103002-20797496.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_368x368_160k_kittistep/segformer_mit-b5_368x368_160k_kittistep_20230506_103002.log.json) |
## Checklist
- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.
- [x] Finish the code
- [x] Basic docstrings & proper citation
- [ ] Test-time correctness
- [x] A full README
- [ ] Milestone 2: Indicates a successful model implementation.
- [ ] Training-time correctness
- [ ] Milestone 3: Good to be a part of our core package!
- [ ] Type hints and docstrings
- [ ] Unit tests
- [ ] Code polishing
- [ ] Metafile.yml
- [ ] Move your modules into the core package following the codebase's file hierarchy structure.
- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.
| 3,703 | 36.795918 | 527 | md |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/configs/_base_/datasets/kittistep.py | # dataset settings
dataset_type = 'KITTISTEPDataset'
data_root = 'data/kitti_step/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (368, 368)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(1242, 375), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1242, 375),
img_ratios=[1.0],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='training_openmmlab/image_02/train',
ann_dir='panoptic_maps_openmmlab/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='training_openmmlab/image_02/val',
ann_dir='panoptic_maps_openmmlab/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='training_openmmlab/image_02/val',
ann_dir='panoptic_maps_openmmlab/val',
pipeline=test_pipeline))
| 1,845 | 32.563636 | 77 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/configs/deeplabv3plus/deeplabv3plus_r50-d8_368x368_80k_kittistep.py | _base_ = [
'../../../../configs/_base_/models/deeplabv3plus_r50-d8.py',
'../_base_/datasets/kittistep.py',
'../../../../configs/_base_/default_runtime.py',
'../../../../configs/_base_/schedules/schedule_80k.py'
]
model = dict(
decode_head=dict(align_corners=True),
auxiliary_head=dict(align_corners=True),
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
| 404 | 35.818182 | 73 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/configs/segformer/segformer_mit-b0_368x368_160k_kittistep.py | _base_ = [
'../../../../configs/_base_/models/segformer_mit-b0.py',
'../_base_/datasets/kittistep.py',
'../../../../configs/_base_/default_runtime.py',
'../../../../configs/_base_/schedules/schedule_160k.py'
]
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b0_20220624-7e0fe6dd.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)),
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
# optimizer
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(
_delete_=True,
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0,
min_lr=0.0,
by_epoch=False)
data = dict(samples_per_gpu=2, workers_per_gpu=2)
| 1,056 | 26.102564 | 121 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/configs/segformer/segformer_mit-b5_368x368_160k_kittistep.py | _base_ = ['./segformer_mit-b0_368x368_160k_kittistep.py']
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b5_20220624-658746d9.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint=checkpoint),
embed_dims=64,
num_layers=[3, 6, 40, 3]),
decode_head=dict(in_channels=[64, 128, 320, 512]))
| 392 | 38.3 | 121 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/mmseg/datasets/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .kitti_step import KITTISTEPDataset
__all__ = [
'KITTISTEPDataset',
]
| 128 | 17.428571 | 47 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/mmseg/datasets/kitti_step.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmseg.datasets.builder import DATASETS
from mmseg.datasets.cityscapes import CityscapesDataset
@DATASETS.register_module()
class KITTISTEPDataset(CityscapesDataset):
"""KITTI-STEP dataset."""
def __init__(self,
img_suffix='.png',
seg_map_suffix='_labelTrainIds.png',
**kwargs):
super(KITTISTEPDataset, self).__init__(
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
| 517 | 31.375 | 75 | py |
mmsegmentation | mmsegmentation-master/projects/kitti_step_dataset/tools/convert_datasets/kitti_step.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import shutil
import cv2
import mmcv
def kitti_to_train_ids(input):
src, gt_dir, new_gt_dir = input
label_file = src.replace('.png',
'_labelTrainIds.png').replace(gt_dir, new_gt_dir)
img = cv2.imread(src)
dirname = os.path.dirname(label_file)
os.makedirs(dirname, exist_ok=True)
sem_seg = img[:, :, 2]
cv2.imwrite(label_file, sem_seg)
def copy_file(input):
src, dst = input
if not osp.exists(dst):
os.makedirs(osp.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
def parse_args():
parser = argparse.ArgumentParser(
description='Convert KITTI-STEP annotations to TrainIds')
parser.add_argument('kitti_path', help='kitti step data path')
parser.add_argument('--gt-dir', default='panoptic_maps', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
kitti_path = args.kitti_path
out_dir = args.out_dir if args.out_dir else kitti_path
mmcv.mkdir_or_exist(out_dir)
gt_dir = osp.join(kitti_path, args.gt_dir)
ann_files = []
for poly in mmcv.scandir(gt_dir, '.png', recursive=True):
poly_file = osp.join(gt_dir, poly)
ann_files.append([poly_file, args.gt_dir, args.gt_dir + '_openmmlab'])
if args.nproc > 1:
mmcv.track_parallel_progress(kitti_to_train_ids, ann_files, args.nproc)
else:
mmcv.track_progress(kitti_to_train_ids, ann_files)
copy_files = []
for f in mmcv.scandir(gt_dir, '.png', recursive=True):
original_f = osp.join(gt_dir, f).replace(args.gt_dir + '/train',
'training/image_02')
new_f = osp.join(gt_dir, f).replace(args.gt_dir,
'training_openmmlab/image_02')
original_f = original_f.replace(args.gt_dir + '/val',
'training/image_02')
new_f = new_f.replace(args.gt_dir, 'training_openmmlab/image_02')
copy_files.append([original_f, new_f])
if args.nproc > 1:
mmcv.track_parallel_progress(copy_file, copy_files, args.nproc)
else:
mmcv.track_progress(copy_file, copy_files)
if __name__ == '__main__':
main()
| 2,498 | 31.038462 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_config.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
from os.path import dirname, exists, isdir, join, relpath
from mmcv import Config
from torch import nn
from mmseg.models import build_segmentor
def _get_config_directory():
"""Find the predefined segmentor config directory."""
try:
# Assume we are running in the source mmsegmentation repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmseg
repo_dpath = dirname(dirname(mmseg.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def test_config_build_segmentor():
"""Test that all segmentation models defined in the configs can be
initialized."""
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_fpaths = []
# one config each sub folder
for sub_folder in os.listdir(config_dpath):
if isdir(sub_folder):
config_fpaths.append(
list(glob.glob(join(config_dpath, sub_folder, '*.py')))[0])
config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
print('Building segmentor, config_fpath = {!r}'.format(config_fpath))
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model:
config_mod.model['pretrained'] = None
print('building {}'.format(config_fname))
segmentor = build_segmentor(config_mod.model)
assert segmentor is not None
head_config = config_mod.model['decode_head']
_check_decode_head(head_config, segmentor.decode_head)
def test_config_data_pipeline():
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_config.py test_config_build_data_pipeline
"""
import numpy as np
from mmcv import Config
from mmseg.datasets.pipelines import Compose
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
print(
'Building data pipeline, config_fpath = {!r}'.format(config_fpath))
config_mod = Config.fromfile(config_fpath)
# remove loading pipeline
load_img_pipeline = config_mod.train_pipeline.pop(0)
to_float32 = load_img_pipeline.get('to_float32', False)
config_mod.train_pipeline.pop(0)
config_mod.test_pipeline.pop(0)
train_pipeline = Compose(config_mod.train_pipeline)
test_pipeline = Compose(config_mod.test_pipeline)
img = np.random.randint(0, 255, size=(1024, 2048, 3), dtype=np.uint8)
if to_float32:
img = img.astype(np.float32)
seg = np.random.randint(0, 255, size=(1024, 2048, 1), dtype=np.uint8)
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_semantic_seg=seg)
results['seg_fields'] = ['gt_semantic_seg']
print('Test training data pipeline: \n{!r}'.format(train_pipeline))
output_results = train_pipeline(results)
assert output_results is not None
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
)
print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
output_results = test_pipeline(results)
assert output_results is not None
def _check_decode_head(decode_head_cfg, decode_head):
if isinstance(decode_head_cfg, list):
assert isinstance(decode_head, nn.ModuleList)
assert len(decode_head_cfg) == len(decode_head)
num_heads = len(decode_head)
for i in range(num_heads):
_check_decode_head(decode_head_cfg[i], decode_head[i])
return
# check consistency between head_config and roi_head
assert decode_head_cfg['type'] == decode_head.__class__.__name__
assert decode_head_cfg['type'] == decode_head.__class__.__name__
in_channels = decode_head_cfg.in_channels
input_transform = decode_head.input_transform
assert input_transform in ['resize_concat', 'multiple_select', None]
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(decode_head.in_index, (list, tuple))
assert len(in_channels) == len(decode_head.in_index)
elif input_transform == 'resize_concat':
assert sum(in_channels) == decode_head.in_channels
else:
assert isinstance(in_channels, int)
assert in_channels == decode_head.in_channels
assert isinstance(decode_head.in_index, int)
if decode_head_cfg['type'] == 'PointHead':
assert decode_head_cfg.channels+decode_head_cfg.num_classes == \
decode_head.fc_seg.in_channels
assert decode_head.fc_seg.out_channels == decode_head_cfg.num_classes
else:
assert decode_head_cfg.channels == decode_head.conv_seg.in_channels
assert decode_head.conv_seg.out_channels == decode_head_cfg.num_classes
| 6,068 | 36.233129 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_digit_version.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmseg import digit_version
def test_digit_version():
assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)
assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)
assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0)
assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1)
assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0)
assert digit_version('1.0') == digit_version('1.0.0')
assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')
assert digit_version('1.0.0dev') < digit_version('1.0.0a')
assert digit_version('1.0.0a') < digit_version('1.0.0a1')
assert digit_version('1.0.0a') < digit_version('1.0.0b')
assert digit_version('1.0.0b') < digit_version('1.0.0rc')
assert digit_version('1.0.0rc1') < digit_version('1.0.0')
assert digit_version('1.0.0') < digit_version('1.0.0post')
assert digit_version('1.0.0post') < digit_version('1.0.0post1')
assert digit_version('v1') == (1, 0, 0, 0, 0, 0)
assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)
| 1,089 | 48.545455 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_eval_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import tempfile
from unittest.mock import MagicMock, patch
import mmcv.runner
import pytest
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from torch.utils.data import DataLoader, Dataset
from mmseg.apis import single_gpu_test
from mmseg.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __getitem__(self, idx):
results = dict(img=torch.tensor([1]), img_metas=dict())
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, img, img_metas, test_mode=False, **kwargs):
return img
def train_step(self, data_batch, optimizer):
loss = self.forward(**data_batch)
return dict(loss=loss)
def test_iter_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=model.parameters()))
# test EvalHook
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = EvalHook(data_loader, by_epoch=False, efficient_test=True)
runner = mmcv.runner.IterBasedRunner(
model=model,
optimizer=optimizer,
work_dir=tmpdir,
logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
logger=runner.logger)
def test_epoch_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHook(data_loader, by_epoch=True)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=model.parameters()))
# test EvalHook with interval
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = EvalHook(data_loader, by_epoch=True, interval=2)
runner = mmcv.runner.EpochBasedRunner(
model=model,
optimizer=optimizer,
work_dir=tmpdir,
logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
test_dataset.evaluate.assert_called_once_with([torch.tensor([1])],
logger=runner.logger)
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
pre_eval=False):
# Pre eval is set by default when training.
results = single_gpu_test(model, data_loader, pre_eval=True)
return results
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
def test_dist_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
DistEvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=model.parameters()))
# test DistEvalHook
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = DistEvalHook(
data_loader, by_epoch=False, efficient_test=True)
runner = mmcv.runner.IterBasedRunner(
model=model,
optimizer=optimizer,
work_dir=tmpdir,
logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
logger=runner.logger)
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
def test_dist_eval_hook_epoch():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
DistEvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=model.parameters()))
# test DistEvalHook
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2)
runner = mmcv.runner.EpochBasedRunner(
model=model,
optimizer=optimizer,
work_dir=tmpdir,
logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
logger=runner.logger)
| 7,237 | 34.307317 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
from mmseg.apis import inference_segmentor, init_segmentor
def test_test_time_augmentation_on_cpu():
config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
config = mmcv.Config.fromfile(config_file)
# Remove pretrain model download for testing
config.model.pretrained = None
# Replace SyncBN with BN to inference on CPU
norm_cfg = dict(type='BN', requires_grad=True)
config.model.backbone.norm_cfg = norm_cfg
config.model.decode_head.norm_cfg = norm_cfg
config.model.auxiliary_head.norm_cfg = norm_cfg
# Enable test time augmentation
config.data.test.pipeline[1].flip = True
checkpoint_file = None
model = init_segmentor(config, checkpoint_file, device='cpu')
img = mmcv.imread(
osp.join(osp.dirname(__file__), 'data/color.jpg'), 'color')
result = inference_segmentor(model, img)
assert result[0].shape == (288, 512)
| 996 | 31.16129 | 75 | py |
mmsegmentation | mmsegmentation-master/tests/test_metrics.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmseg.core.evaluation import (eval_metrics, mean_dice, mean_fscore,
mean_iou)
from mmseg.core.evaluation.metrics import f_score
def get_confusion_matrix(pred_label, label, num_classes, ignore_index):
"""Intersection over Union
Args:
pred_label (np.ndarray): 2D predict map
label (np.ndarray): label 2D label map
num_classes (int): number of categories
ignore_index (int): index ignore in evaluation
"""
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
n = num_classes
inds = n * label + pred_label
mat = np.bincount(inds, minlength=n**2).reshape(n, n)
return mat
# This func is deprecated since it's not memory efficient
def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
acc = np.diag(total_mat) / total_mat.sum(axis=1)
iou = np.diag(total_mat) / (
total_mat.sum(axis=1) + total_mat.sum(axis=0) - np.diag(total_mat))
return all_acc, acc, iou
# This func is deprecated since it's not memory efficient
def legacy_mean_dice(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
acc = np.diag(total_mat) / total_mat.sum(axis=1)
dice = 2 * np.diag(total_mat) / (
total_mat.sum(axis=1) + total_mat.sum(axis=0))
return all_acc, acc, dice
# This func is deprecated since it's not memory efficient
def legacy_mean_fscore(results,
gt_seg_maps,
num_classes,
ignore_index,
beta=1):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
recall = np.diag(total_mat) / total_mat.sum(axis=1)
precision = np.diag(total_mat) / total_mat.sum(axis=0)
fv = np.vectorize(f_score)
fscore = fv(precision, recall, beta=beta)
return all_acc, recall, precision, fscore
def test_metrics():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
# Test the availability of arg: ignore_index.
label[:, 2, 5:10] = ignore_index
# Test the correctness of the implementation of mIoU calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
# Test the correctness of the implementation of mDice calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mDice')
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(dice, dice_l)
# Test the correctness of the implementation of mDice calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mFscore')
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
# Test the correctness of the implementation of joint calculation.
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index,
metrics=['mIoU', 'mDice', 'mFscore'])
all_acc, acc, iou, dice, precision, recall, fscore = ret_metrics[
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
'Dice'], ret_metrics['Precision'], ret_metrics[
'Recall'], ret_metrics['Fscore']
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
assert np.allclose(dice, dice_l)
assert np.allclose(precision, precision_l)
assert np.allclose(recall, recall_l)
assert np.allclose(fscore, fscore_l)
# Test the correctness of calculation when arg: num_classes is larger
# than the maximum value of input maps.
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mIoU',
nan_to_num=-1)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert acc[-1] == -1
assert iou[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mDice',
nan_to_num=-1)
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
assert acc[-1] == -1
assert dice[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mFscore',
nan_to_num=-1)
all_acc, precision, recall, fscore = ret_metrics['aAcc'], ret_metrics[
'Precision'], ret_metrics['Recall'], ret_metrics['Fscore']
assert precision[-1] == -1
assert recall[-1] == -1
assert fscore[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics=['mDice', 'mIoU', 'mFscore'],
nan_to_num=-1)
all_acc, acc, iou, dice, precision, recall, fscore = ret_metrics[
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
'Dice'], ret_metrics['Precision'], ret_metrics[
'Recall'], ret_metrics['Fscore']
assert acc[-1] == -1
assert dice[-1] == -1
assert iou[-1] == -1
assert precision[-1] == -1
assert recall[-1] == -1
assert fscore[-1] == -1
# Test the bug which is caused by torch.histc.
# torch.histc: https://pytorch.org/docs/stable/generated/torch.histc.html
# When the arg:bins is set to be same as arg:max,
# some channels of mIoU may be nan.
results = np.array([np.repeat(31, 59)])
label = np.array([np.arange(59)])
num_classes = 59
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index=255, metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert not np.any(np.isnan(iou))
def test_mean_iou():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_iou(results, label, num_classes, ignore_index)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_iou(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert acc[-1] == -1
assert acc[-1] == -1
def test_mean_dice():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_dice(results, label, num_classes, ignore_index)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, dice_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_dice(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
assert acc[-1] == -1
assert dice[-1] == -1
def test_mean_fscore():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_fscore(results, label, num_classes, ignore_index)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
ret_metrics = mean_fscore(
results, label, num_classes, ignore_index, beta=2)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index, beta=2)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_fscore(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
assert recall[-1] == -1
assert precision[-1] == -1
assert fscore[-1] == -1
def test_filename_inputs():
import tempfile
import cv2
def save_arr(input_arrays: list, title: str, is_image: bool, dir: str):
filenames = []
SUFFIX = '.png' if is_image else '.npy'
for idx, arr in enumerate(input_arrays):
filename = '{}/{}-{}{}'.format(dir, title, idx, SUFFIX)
if is_image:
cv2.imwrite(filename, arr)
else:
np.save(filename, arr)
filenames.append(filename)
return filenames
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
labels = np.random.randint(0, num_classes, size=pred_size)
labels[:, 2, 5:10] = ignore_index
with tempfile.TemporaryDirectory() as temp_dir:
result_files = save_arr(results, 'pred', False, temp_dir)
label_files = save_arr(labels, 'label', True, temp_dir)
ret_metrics = eval_metrics(
result_files,
label_files,
num_classes,
ignore_index,
metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics[
'Acc'], ret_metrics['IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, labels, num_classes,
ignore_index)
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
| 13,345 | 36.914773 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.core import OHEMPixelSampler
from mmseg.models.decode_heads import FCNHead
def _context_for_ohem():
return FCNHead(in_channels=32, channels=16, num_classes=19)
def _context_for_ohem_multiple_loss():
return FCNHead(
in_channels=32,
channels=16,
num_classes=19,
loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_1'),
dict(type='CrossEntropyLoss', loss_name='loss_2')
])
def test_ohem_sampler():
with pytest.raises(AssertionError):
# seg_logit and seg_label must be of the same size
sampler = OHEMPixelSampler(context=_context_for_ohem())
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 89, 89))
sampler.sample(seg_logit, seg_label)
# test with thresh
sampler = OHEMPixelSampler(
context=_context_for_ohem(), thresh=0.7, min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert seg_weight.shape[0] == seg_logit.shape[0]
assert seg_weight.shape[1:] == seg_logit.shape[2:]
assert seg_weight.sum() > 200
# test w.o thresh
sampler = OHEMPixelSampler(context=_context_for_ohem(), min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert seg_weight.shape[0] == seg_logit.shape[0]
assert seg_weight.shape[1:] == seg_logit.shape[2:]
assert seg_weight.sum() == 200
# test multiple losses case
with pytest.raises(AssertionError):
# seg_logit and seg_label must be of the same size
sampler = OHEMPixelSampler(context=_context_for_ohem_multiple_loss())
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 89, 89))
sampler.sample(seg_logit, seg_label)
# test with thresh in multiple losses case
sampler = OHEMPixelSampler(
context=_context_for_ohem_multiple_loss(), thresh=0.7, min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert seg_weight.shape[0] == seg_logit.shape[0]
assert seg_weight.shape[1:] == seg_logit.shape[2:]
assert seg_weight.sum() > 200
# test w.o thresh in multiple losses case
sampler = OHEMPixelSampler(
context=_context_for_ohem_multiple_loss(), min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert seg_weight.shape[0] == seg_logit.shape[0]
assert seg_weight.shape[1:] == seg_logit.shape[2:]
assert seg_weight.sum() == 200
| 2,957 | 36.443038 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_apis/test_single_gpu.py | # Copyright (c) OpenMMLab. All rights reserved.
import shutil
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, dataloader
from mmseg.apis import single_gpu_test
class ExampleDataset(Dataset):
def __getitem__(self, idx):
results = dict(img=torch.tensor([1]), img_metas=dict())
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, img, img_metas, return_loss=False, **kwargs):
return img
def test_single_gpu():
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False,
)
model = ExampleModel()
# Test efficient test compatibility (will be deprecated)
results = single_gpu_test(model, data_loader, efficient_test=True)
assert len(results) == 1
pred = np.load(results[0])
assert isinstance(pred, np.ndarray)
assert pred.shape == (1, )
assert pred[0] == 1
shutil.rmtree('.efficient_test')
# Test pre_eval
test_dataset.pre_eval = MagicMock(return_value=['success'])
results = single_gpu_test(model, data_loader, pre_eval=True)
assert results == ['success']
# Test format_only
test_dataset.format_results = MagicMock(return_value=['success'])
results = single_gpu_test(model, data_loader, format_only=True)
assert results == ['success']
# efficient_test, pre_eval and format_only are mutually exclusive
with pytest.raises(AssertionError):
single_gpu_test(
model,
dataloader,
efficient_test=True,
format_only=True,
pre_eval=True)
| 1,932 | 25.121622 | 70 | py |
mmsegmentation | mmsegmentation-master/tests/test_core/test_layer_decay_optimizer_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.core.optimizers.layer_decay_optimizer_constructor import (
LayerDecayOptimizerConstructor, LearningRateDecayOptimizerConstructor)
base_lr = 1
decay_rate = 2
base_wd = 0.05
weight_decay = 0.05
expected_stage_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 8
}, {
'weight_decay': 0.0,
'lr_scale': 8
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
expected_layer_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 2
}, {
'weight_decay': 0.0,
'lr_scale': 2
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
expected_layer_wise_wd_lr_beit = [{
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 8
}, {
'weight_decay': 0.0,
'lr_scale': 8
}, {
'weight_decay': 0.05,
'lr_scale': 4
}, {
'weight_decay': 0.0,
'lr_scale': 4
}, {
'weight_decay': 0.05,
'lr_scale': 2
}, {
'weight_decay': 0.0,
'lr_scale': 2
}, {
'weight_decay': 0.05,
'lr_scale': 1
}, {
'weight_decay': 0.0,
'lr_scale': 1
}]
class ToyConvNeXt(nn.Module):
def __init__(self):
super().__init__()
self.stages = nn.ModuleList()
for i in range(4):
stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))
self.stages.append(stage)
self.norm0 = nn.BatchNorm2d(2)
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.mask_token = nn.Parameter(torch.ones(1))
self.pos_embed = nn.Parameter(torch.ones(1))
self.stem_norm = nn.Parameter(torch.ones(1))
self.downsample_norm0 = nn.BatchNorm2d(2)
self.downsample_norm1 = nn.BatchNorm2d(2)
self.downsample_norm2 = nn.BatchNorm2d(2)
self.lin = nn.Parameter(torch.ones(1))
self.lin.requires_grad = False
self.downsample_layers = nn.ModuleList()
for _ in range(4):
stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))
self.downsample_layers.append(stage)
class ToyBEiT(nn.Module):
def __init__(self):
super().__init__()
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.patch_embed = nn.Parameter(torch.ones(1))
self.layers = nn.ModuleList()
for _ in range(3):
layer = nn.Conv2d(3, 3, 1)
self.layers.append(layer)
class ToyMAE(nn.Module):
def __init__(self):
super().__init__()
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.patch_embed = nn.Parameter(torch.ones(1))
self.layers = nn.ModuleList()
for _ in range(3):
layer = nn.Conv2d(3, 3, 1)
self.layers.append(layer)
class ToySegmentor(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.decode_head = nn.Conv2d(2, 2, kernel_size=1, groups=2)
class PseudoDataParallel(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
class ToyViT(nn.Module):
def __init__(self):
super().__init__()
def check_optimizer_lr_wd(optimizer, gt_lr_wd):
assert isinstance(optimizer, torch.optim.AdamW)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['weight_decay'] == base_wd
param_groups = optimizer.param_groups
print(param_groups)
assert len(param_groups) == len(gt_lr_wd)
for i, param_dict in enumerate(param_groups):
assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay']
assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale']
assert param_dict['lr_scale'] == param_dict['lr']
def test_learning_rate_decay_optimizer_constructor():
# Test lr wd for ConvNeXT
backbone = ToyConvNeXt()
model = PseudoDataParallel(ToySegmentor(backbone))
optimizer_cfg = dict(
type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)
# stagewise decay
stagewise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, stagewise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_stage_wise_lr_wd_convnext)
# layerwise decay
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_lr_wd_convnext)
# Test lr wd for BEiT
backbone = ToyBEiT()
model = PseudoDataParallel(ToySegmentor(backbone))
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=3)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_wd_lr_beit)
# Test invalidation of lr wd for Vit
backbone = ToyViT()
model = PseudoDataParallel(ToySegmentor(backbone))
with pytest.raises(NotImplementedError):
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
with pytest.raises(NotImplementedError):
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, stagewise_paramwise_cfg)
optimizer = optim_constructor(model)
# Test lr wd for MAE
backbone = ToyMAE()
model = PseudoDataParallel(ToySegmentor(backbone))
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=3)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_wd_lr_beit)
def test_beit_layer_decay_optimizer_constructor():
# paramwise_cfg with BEiTExampleModel
backbone = ToyBEiT()
model = PseudoDataParallel(ToySegmentor(backbone))
optimizer_cfg = dict(
type='AdamW', lr=1, betas=(0.9, 0.999), weight_decay=0.05)
paramwise_cfg = dict(layer_decay_rate=2, num_layers=3)
optim_constructor = LayerDecayOptimizerConstructor(optimizer_cfg,
paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_wd_lr_beit)
| 7,796 | 27.25 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_core/test_optimizer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmcv.runner import DefaultOptimizerConstructor
from mmseg.core.builder import (OPTIMIZER_BUILDERS, build_optimizer,
build_optimizer_constructor)
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
def forward(self, x):
return x
base_lr = 0.01
base_wd = 0.0001
momentum = 0.9
def test_build_optimizer_constructor():
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optim_constructor_cfg = dict(
type='DefaultOptimizerConstructor', optimizer_cfg=optimizer_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
# Test whether optimizer constructor can be built from parent.
assert type(optim_constructor) is DefaultOptimizerConstructor
@OPTIMIZER_BUILDERS.register_module()
class MyOptimizerConstructor(DefaultOptimizerConstructor):
pass
optim_constructor_cfg = dict(
type='MyOptimizerConstructor', optimizer_cfg=optimizer_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
# Test optimizer constructor can be built from child registry.
assert type(optim_constructor) is MyOptimizerConstructor
# Test unregistered constructor cannot be built
with pytest.raises(KeyError):
build_optimizer_constructor(dict(type='A'))
def test_build_optimizer():
model = ExampleModel()
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optimizer = build_optimizer(model, optimizer_cfg)
# test whether optimizer is successfully built from parent.
assert isinstance(optimizer, torch.optim.SGD)
| 2,004 | 32.416667 | 74 | py |
mmsegmentation | mmsegmentation-master/tests/test_data/test_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import shutil
import tempfile
from typing import Generator
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import torch
from PIL import Image
from mmseg.core.evaluation import get_classes, get_palette
from mmseg.datasets import (DATASETS, ADE20KDataset, CityscapesDataset,
COCOStuffDataset, ConcatDataset, CustomDataset,
ISPRSDataset, LoveDADataset, MultiImageMixDataset,
PascalVOCDataset, PotsdamDataset, RepeatDataset,
build_dataset, iSAIDDataset)
def test_classes():
assert list(CityscapesDataset.CLASSES) == get_classes('cityscapes')
assert list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes(
'pascal_voc')
assert list(
ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k')
assert list(COCOStuffDataset.CLASSES) == get_classes('cocostuff')
assert list(LoveDADataset.CLASSES) == get_classes('loveda')
assert list(PotsdamDataset.CLASSES) == get_classes('potsdam')
assert list(ISPRSDataset.CLASSES) == get_classes('vaihingen')
assert list(iSAIDDataset.CLASSES) == get_classes('isaid')
with pytest.raises(ValueError):
get_classes('unsupported')
def test_classes_file_path():
tmp_file = tempfile.NamedTemporaryFile()
classes_path = f'{tmp_file.name}.txt'
train_pipeline = [dict(type='LoadImageFromFile')]
kwargs = dict(pipeline=train_pipeline, img_dir='./', classes=classes_path)
# classes.txt with full categories
categories = get_classes('cityscapes')
with open(classes_path, 'w') as f:
f.write('\n'.join(categories))
assert list(CityscapesDataset(**kwargs).CLASSES) == categories
# classes.txt with sub categories
categories = ['road', 'sidewalk', 'building']
with open(classes_path, 'w') as f:
f.write('\n'.join(categories))
assert list(CityscapesDataset(**kwargs).CLASSES) == categories
# classes.txt with unknown categories
categories = ['road', 'sidewalk', 'unknown']
with open(classes_path, 'w') as f:
f.write('\n'.join(categories))
with pytest.raises(ValueError):
CityscapesDataset(**kwargs)
tmp_file.close()
os.remove(classes_path)
assert not osp.exists(classes_path)
def test_palette():
assert CityscapesDataset.PALETTE == get_palette('cityscapes')
assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette(
'pascal_voc')
assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k')
assert LoveDADataset.PALETTE == get_palette('loveda')
assert PotsdamDataset.PALETTE == get_palette('potsdam')
assert COCOStuffDataset.PALETTE == get_palette('cocostuff')
assert iSAIDDataset.PALETTE == get_palette('isaid')
with pytest.raises(ValueError):
get_palette('unsupported')
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
def test_dataset_wrapper():
# CustomDataset.load_annotations = MagicMock()
# CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_a = 10
dataset_a.img_infos = MagicMock()
dataset_a.img_infos.__len__.return_value = len_a
dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_b = 20
dataset_b.img_infos = MagicMock()
dataset_b.img_infos.__len__.return_value = len_b
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert len(repeat_dataset) == 10 * len(dataset_a)
img_scale = (60, 60)
pipeline = [
dict(type='RandomMosaic', prob=1, img_scale=img_scale),
dict(type='RandomFlip', prob=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=False),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_semantic_seg = np.random.randint(5, size=(height, weight))
results.append(dict(gt_semantic_seg=gt_semantic_seg, img=img))
classes = ['0', '1', '2', '3', '4']
palette = [(0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)]
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
img_dir=MagicMock(),
pipeline=[],
test_mode=True,
classes=classes,
palette=palette)
len_a = 2
dataset_a.img_infos = MagicMock()
dataset_a.img_infos.__len__.return_value = len_a
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
assert len(multi_image_mix_dataset) == len(dataset_a)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a, pipeline, skip_type_keys=('RandomFlip'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
skip_type_keys = ('RandomFlip', 'Resize')
multi_image_mix_dataset.update_skip_type_keys(skip_type_keys)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape[:2] != img_scale
# test pipeline
with pytest.raises(TypeError):
pipeline = [['Resize']]
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
def test_custom_dataset():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(128, 256),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# with img_dir and ann_dir
train_dataset = CustomDataset(
train_pipeline,
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
img_dir='imgs/',
ann_dir='gts/',
img_suffix='img.jpg',
seg_map_suffix='gt.png')
assert len(train_dataset) == 5
# with img_dir, ann_dir, split
train_dataset = CustomDataset(
train_pipeline,
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
img_dir='imgs/',
ann_dir='gts/',
img_suffix='img.jpg',
seg_map_suffix='gt.png',
split='splits/train.txt')
assert len(train_dataset) == 4
# no data_root
train_dataset = CustomDataset(
train_pipeline,
img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts'),
img_suffix='img.jpg',
seg_map_suffix='gt.png')
assert len(train_dataset) == 5
# with data_root but img_dir/ann_dir are abs path
train_dataset = CustomDataset(
train_pipeline,
data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
img_dir=osp.abspath(
osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
ann_dir=osp.abspath(
osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts')),
img_suffix='img.jpg',
seg_map_suffix='gt.png')
assert len(train_dataset) == 5
# test_mode=True
test_dataset = CustomDataset(
test_pipeline,
img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
img_suffix='img.jpg',
test_mode=True,
classes=('pseudo_class', ))
assert len(test_dataset) == 5
# training data get
train_data = train_dataset[0]
assert isinstance(train_data, dict)
# test data get
test_data = test_dataset[0]
assert isinstance(test_data, dict)
# get gt seg map
gt_seg_maps = train_dataset.get_gt_seg_maps(efficient_test=True)
assert isinstance(gt_seg_maps, Generator)
gt_seg_maps = list(gt_seg_maps)
assert len(gt_seg_maps) == 5
# format_results not implemented
with pytest.raises(NotImplementedError):
test_dataset.format_results([], '')
pseudo_results = []
for gt_seg_map in gt_seg_maps:
h, w = gt_seg_map.shape
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
# test past evaluation without CLASSES
with pytest.raises(TypeError):
eval_results = train_dataset.evaluate(pseudo_results, metric=['mIoU'])
with pytest.raises(TypeError):
eval_results = train_dataset.evaluate(pseudo_results, metric='mDice')
with pytest.raises(TypeError):
eval_results = train_dataset.evaluate(
pseudo_results, metric=['mDice', 'mIoU'])
# test past evaluation with CLASSES
train_dataset.CLASSES = tuple(['a'] * 7)
eval_results = train_dataset.evaluate(pseudo_results, metric='mIoU')
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(pseudo_results, metric='mDice')
assert isinstance(eval_results, dict)
assert 'mDice' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(pseudo_results, metric='mFscore')
assert isinstance(eval_results, dict)
assert 'mRecall' in eval_results
assert 'mPrecision' in eval_results
assert 'mFscore' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(
pseudo_results, metric=['mIoU', 'mDice', 'mFscore'])
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mDice' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
assert 'mFscore' in eval_results
assert 'mPrecision' in eval_results
assert 'mRecall' in eval_results
assert not np.isnan(eval_results['mIoU'])
assert not np.isnan(eval_results['mDice'])
assert not np.isnan(eval_results['mAcc'])
assert not np.isnan(eval_results['aAcc'])
assert not np.isnan(eval_results['mFscore'])
assert not np.isnan(eval_results['mPrecision'])
assert not np.isnan(eval_results['mRecall'])
# test evaluation with pre-eval and the dataset.CLASSES is necessary
train_dataset.CLASSES = tuple(['a'] * 7)
pseudo_results = []
for idx in range(len(train_dataset)):
h, w = gt_seg_maps[idx].shape
pseudo_result = np.random.randint(low=0, high=7, size=(h, w))
pseudo_results.extend(train_dataset.pre_eval(pseudo_result, idx))
eval_results = train_dataset.evaluate(pseudo_results, metric=['mIoU'])
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(pseudo_results, metric='mDice')
assert isinstance(eval_results, dict)
assert 'mDice' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(pseudo_results, metric='mFscore')
assert isinstance(eval_results, dict)
assert 'mRecall' in eval_results
assert 'mPrecision' in eval_results
assert 'mFscore' in eval_results
assert 'aAcc' in eval_results
eval_results = train_dataset.evaluate(
pseudo_results, metric=['mIoU', 'mDice', 'mFscore'])
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mDice' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
assert 'mFscore' in eval_results
assert 'mPrecision' in eval_results
assert 'mRecall' in eval_results
assert not np.isnan(eval_results['mIoU'])
assert not np.isnan(eval_results['mDice'])
assert not np.isnan(eval_results['mAcc'])
assert not np.isnan(eval_results['aAcc'])
assert not np.isnan(eval_results['mFscore'])
assert not np.isnan(eval_results['mPrecision'])
assert not np.isnan(eval_results['mRecall'])
def test_custom_dataset_pre_eval():
"""Test pre-eval function of custom dataset with reduce zero label and
removed classes.
The GT segmentation contain 4 classes: "A", "B", "C", "D", as well as
a zero label. Therefore, the labels go from 0 to 4.
Then, we will remove class "C" while instantiating the dataset. Therefore,
pre-eval must reduce the zero label and also apply label_map in the correct
order.
"""
# create a dummy dataset on disk
img = np.random.rand(10, 10)
ann = np.zeros_like(img)
ann[2:4, 2:4] = 1
ann[2:4, 6:8] = 2
ann[6:8, 2:4] = 3
ann[6:8, 6:8] = 4
tmp_dir = tempfile.TemporaryDirectory()
img_path = osp.join(tmp_dir.name, 'img', '00000.jpg')
ann_path = osp.join(tmp_dir.name, 'ann', '00000.png')
import mmcv
mmcv.imwrite(img, img_path)
mmcv.imwrite(ann, ann_path)
class FourClassDatasetWithZeroLabel(CustomDataset):
CLASSES = ['A', 'B', 'C', 'D'] # 4 classes
PALETTE = [(0, 0, 0)] * 4 # dummy palette
# with img_dir, ann_dir, split
dataset = FourClassDatasetWithZeroLabel(
[],
classes=['A', 'B', 'D'], # original classes with class "C" removed
reduce_zero_label=True, # reduce zero label set to True
data_root=osp.join(osp.dirname(__file__), tmp_dir.name),
img_dir='img/',
ann_dir='ann/',
img_suffix='.jpg',
seg_map_suffix='.png')
assert len(dataset) == 1
# there are three classes ("A", "B", "D") that the network predicts
perfect_pred = np.zeros([10, 10], dtype=np.int64)
perfect_pred[2:4, 2:4] = 0 # 'A': 1 reduced to 0 that maps to 0
perfect_pred[2:4, 6:8] = 1 # 'B': 2 reduced to 1 that maps to 1
perfect_pred[6:8, 2:4] = 0 # 'C': 3 reduced to 2 that maps to -1, ignored
perfect_pred[6:8, 6:8] = 2 # 'D': 4 reduced to 3 that maps to 2
results = dataset.pre_eval([perfect_pred], [0])
from mmseg.core.evaluation.metrics import pre_eval_to_metrics
eval_results = pre_eval_to_metrics(results, ['mIoU', 'mDice', 'mFscore'])
# the results should be perfect
for metric in 'IoU', 'aAcc', 'Acc', 'Dice', 'Fscore', 'Precision', \
'Recall':
assert (eval_results[metric] == 1.0).all()
tmp_dir.cleanup()
@pytest.mark.parametrize('separate_eval', [True, False])
def test_eval_concat_custom_dataset(separate_eval):
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(128, 256),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data_root = osp.join(osp.dirname(__file__), '../data/pseudo_dataset')
img_dir = 'imgs/'
ann_dir = 'gts/'
cfg1 = dict(
type='CustomDataset',
pipeline=test_pipeline,
data_root=data_root,
img_dir=img_dir,
ann_dir=ann_dir,
img_suffix='img.jpg',
seg_map_suffix='gt.png',
classes=tuple(['a'] * 7))
dataset1 = build_dataset(cfg1)
assert len(dataset1) == 5
# get gt seg map
gt_seg_maps = dataset1.get_gt_seg_maps(efficient_test=True)
assert isinstance(gt_seg_maps, Generator)
gt_seg_maps = list(gt_seg_maps)
assert len(gt_seg_maps) == 5
# test past evaluation
pseudo_results = []
for gt_seg_map in gt_seg_maps:
h, w = gt_seg_map.shape
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
eval_results1 = dataset1.evaluate(
pseudo_results, metric=['mIoU', 'mDice', 'mFscore'])
# We use same dir twice for simplicity
# with ann_dir
cfg2 = dict(
type='CustomDataset',
pipeline=test_pipeline,
data_root=data_root,
img_dir=[img_dir, img_dir],
ann_dir=[ann_dir, ann_dir],
img_suffix='img.jpg',
seg_map_suffix='gt.png',
classes=tuple(['a'] * 7),
separate_eval=separate_eval)
dataset2 = build_dataset(cfg2)
assert isinstance(dataset2, ConcatDataset)
assert len(dataset2) == 10
eval_results2 = dataset2.evaluate(
pseudo_results * 2, metric=['mIoU', 'mDice', 'mFscore'])
if separate_eval:
assert eval_results1['mIoU'] == eval_results2[
'0_mIoU'] == eval_results2['1_mIoU']
assert eval_results1['mDice'] == eval_results2[
'0_mDice'] == eval_results2['1_mDice']
assert eval_results1['mAcc'] == eval_results2[
'0_mAcc'] == eval_results2['1_mAcc']
assert eval_results1['aAcc'] == eval_results2[
'0_aAcc'] == eval_results2['1_aAcc']
assert eval_results1['mFscore'] == eval_results2[
'0_mFscore'] == eval_results2['1_mFscore']
assert eval_results1['mPrecision'] == eval_results2[
'0_mPrecision'] == eval_results2['1_mPrecision']
assert eval_results1['mRecall'] == eval_results2[
'0_mRecall'] == eval_results2['1_mRecall']
else:
assert eval_results1['mIoU'] == eval_results2['mIoU']
assert eval_results1['mDice'] == eval_results2['mDice']
assert eval_results1['mAcc'] == eval_results2['mAcc']
assert eval_results1['aAcc'] == eval_results2['aAcc']
assert eval_results1['mFscore'] == eval_results2['mFscore']
assert eval_results1['mPrecision'] == eval_results2['mPrecision']
assert eval_results1['mRecall'] == eval_results2['mRecall']
# test get dataset_idx and sample_idx from ConcateDataset
dataset_idx, sample_idx = dataset2.get_dataset_idx_and_sample_idx(3)
assert dataset_idx == 0
assert sample_idx == 3
dataset_idx, sample_idx = dataset2.get_dataset_idx_and_sample_idx(7)
assert dataset_idx == 1
assert sample_idx == 2
dataset_idx, sample_idx = dataset2.get_dataset_idx_and_sample_idx(-7)
assert dataset_idx == 0
assert sample_idx == 3
# test negative indice exceed length of dataset
with pytest.raises(ValueError):
dataset_idx, sample_idx = dataset2.get_dataset_idx_and_sample_idx(-11)
# test negative indice value
indice = -6
dataset_idx1, sample_idx1 = dataset2.get_dataset_idx_and_sample_idx(indice)
dataset_idx2, sample_idx2 = dataset2.get_dataset_idx_and_sample_idx(
len(dataset2) + indice)
assert dataset_idx1 == dataset_idx2
assert sample_idx1 == sample_idx2
# test evaluation with pre-eval and the dataset.CLASSES is necessary
pseudo_results = []
eval_results1 = []
for idx in range(len(dataset1)):
h, w = gt_seg_maps[idx].shape
pseudo_result = np.random.randint(low=0, high=7, size=(h, w))
pseudo_results.append(pseudo_result)
eval_results1.extend(dataset1.pre_eval(pseudo_result, idx))
assert len(eval_results1) == len(dataset1)
assert isinstance(eval_results1[0], tuple)
assert len(eval_results1[0]) == 4
assert isinstance(eval_results1[0][0], torch.Tensor)
eval_results1 = dataset1.evaluate(
eval_results1, metric=['mIoU', 'mDice', 'mFscore'])
pseudo_results = pseudo_results * 2
eval_results2 = []
for idx in range(len(dataset2)):
eval_results2.extend(dataset2.pre_eval(pseudo_results[idx], idx))
assert len(eval_results2) == len(dataset2)
assert isinstance(eval_results2[0], tuple)
assert len(eval_results2[0]) == 4
assert isinstance(eval_results2[0][0], torch.Tensor)
eval_results2 = dataset2.evaluate(
eval_results2, metric=['mIoU', 'mDice', 'mFscore'])
if separate_eval:
assert eval_results1['mIoU'] == eval_results2[
'0_mIoU'] == eval_results2['1_mIoU']
assert eval_results1['mDice'] == eval_results2[
'0_mDice'] == eval_results2['1_mDice']
assert eval_results1['mAcc'] == eval_results2[
'0_mAcc'] == eval_results2['1_mAcc']
assert eval_results1['aAcc'] == eval_results2[
'0_aAcc'] == eval_results2['1_aAcc']
assert eval_results1['mFscore'] == eval_results2[
'0_mFscore'] == eval_results2['1_mFscore']
assert eval_results1['mPrecision'] == eval_results2[
'0_mPrecision'] == eval_results2['1_mPrecision']
assert eval_results1['mRecall'] == eval_results2[
'0_mRecall'] == eval_results2['1_mRecall']
else:
assert eval_results1['mIoU'] == eval_results2['mIoU']
assert eval_results1['mDice'] == eval_results2['mDice']
assert eval_results1['mAcc'] == eval_results2['mAcc']
assert eval_results1['aAcc'] == eval_results2['aAcc']
assert eval_results1['mFscore'] == eval_results2['mFscore']
assert eval_results1['mPrecision'] == eval_results2['mPrecision']
assert eval_results1['mRecall'] == eval_results2['mRecall']
# test batch_indices for pre eval
eval_results2 = dataset2.pre_eval(pseudo_results,
list(range(len(pseudo_results))))
assert len(eval_results2) == len(dataset2)
assert isinstance(eval_results2[0], tuple)
assert len(eval_results2[0]) == 4
assert isinstance(eval_results2[0][0], torch.Tensor)
eval_results2 = dataset2.evaluate(
eval_results2, metric=['mIoU', 'mDice', 'mFscore'])
if separate_eval:
assert eval_results1['mIoU'] == eval_results2[
'0_mIoU'] == eval_results2['1_mIoU']
assert eval_results1['mDice'] == eval_results2[
'0_mDice'] == eval_results2['1_mDice']
assert eval_results1['mAcc'] == eval_results2[
'0_mAcc'] == eval_results2['1_mAcc']
assert eval_results1['aAcc'] == eval_results2[
'0_aAcc'] == eval_results2['1_aAcc']
assert eval_results1['mFscore'] == eval_results2[
'0_mFscore'] == eval_results2['1_mFscore']
assert eval_results1['mPrecision'] == eval_results2[
'0_mPrecision'] == eval_results2['1_mPrecision']
assert eval_results1['mRecall'] == eval_results2[
'0_mRecall'] == eval_results2['1_mRecall']
else:
assert eval_results1['mIoU'] == eval_results2['mIoU']
assert eval_results1['mDice'] == eval_results2['mDice']
assert eval_results1['mAcc'] == eval_results2['mAcc']
assert eval_results1['aAcc'] == eval_results2['aAcc']
assert eval_results1['mFscore'] == eval_results2['mFscore']
assert eval_results1['mPrecision'] == eval_results2['mPrecision']
assert eval_results1['mRecall'] == eval_results2['mRecall']
def test_ade():
test_dataset = ADE20KDataset(
pipeline=[],
img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'))
assert len(test_dataset) == 5
# Test format_results
pseudo_results = []
for _ in range(len(test_dataset)):
h, w = (2, 2)
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
file_paths = test_dataset.format_results(pseudo_results, '.format_ade')
assert len(file_paths) == len(test_dataset)
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp, pseudo_results[0] + 1)
shutil.rmtree('.format_ade')
@pytest.mark.parametrize('separate_eval', [True, False])
def test_concat_ade(separate_eval):
test_dataset = ADE20KDataset(
pipeline=[],
img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'))
assert len(test_dataset) == 5
concat_dataset = ConcatDataset([test_dataset, test_dataset],
separate_eval=separate_eval)
assert len(concat_dataset) == 10
# Test format_results
pseudo_results = []
for _ in range(len(concat_dataset)):
h, w = (2, 2)
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
# test format per image
file_paths = []
for i in range(len(pseudo_results)):
file_paths.extend(
concat_dataset.format_results([pseudo_results[i]],
'.format_ade',
indices=[i]))
assert len(file_paths) == len(concat_dataset)
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp, pseudo_results[0] + 1)
shutil.rmtree('.format_ade')
# test default argument
file_paths = concat_dataset.format_results(pseudo_results, '.format_ade')
assert len(file_paths) == len(concat_dataset)
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp, pseudo_results[0] + 1)
shutil.rmtree('.format_ade')
def test_cityscapes():
test_dataset = CityscapesDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__),
'../data/pseudo_cityscapes_dataset/leftImg8bit'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/gtFine'))
assert len(test_dataset) == 1
gt_seg_maps = list(test_dataset.get_gt_seg_maps())
# Test format_results
pseudo_results = []
for idx in range(len(test_dataset)):
h, w = gt_seg_maps[idx].shape
pseudo_results.append(np.random.randint(low=0, high=19, size=(h, w)))
file_paths = test_dataset.format_results(pseudo_results, '.format_city')
assert len(file_paths) == len(test_dataset)
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp,
test_dataset._convert_to_label_id(pseudo_results[0]))
# Test cityscapes evaluate
test_dataset.evaluate(
pseudo_results, metric='cityscapes', imgfile_prefix='.format_city')
shutil.rmtree('.format_city')
@pytest.mark.parametrize('separate_eval', [True, False])
def test_concat_cityscapes(separate_eval):
cityscape_dataset = CityscapesDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__),
'../data/pseudo_cityscapes_dataset/leftImg8bit'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/gtFine'))
assert len(cityscape_dataset) == 1
with pytest.raises(NotImplementedError):
_ = ConcatDataset([cityscape_dataset, cityscape_dataset],
separate_eval=separate_eval)
ade_dataset = ADE20KDataset(
pipeline=[],
img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'))
assert len(ade_dataset) == 5
with pytest.raises(NotImplementedError):
_ = ConcatDataset([cityscape_dataset, ade_dataset],
separate_eval=separate_eval)
def test_loveda():
test_dataset = LoveDADataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_loveda_dataset/img_dir'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_loveda_dataset/ann_dir'))
assert len(test_dataset) == 3
gt_seg_maps = list(test_dataset.get_gt_seg_maps())
# Test format_results
pseudo_results = []
for idx in range(len(test_dataset)):
h, w = gt_seg_maps[idx].shape
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
file_paths = test_dataset.format_results(pseudo_results, '.format_loveda')
assert len(file_paths) == len(test_dataset)
# Test loveda evaluate
test_dataset.evaluate(
pseudo_results, metric='mIoU', imgfile_prefix='.format_loveda')
shutil.rmtree('.format_loveda')
def test_potsdam():
test_dataset = PotsdamDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_potsdam_dataset/img_dir'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_potsdam_dataset/ann_dir'))
assert len(test_dataset) == 1
def test_vaihingen():
test_dataset = ISPRSDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/img_dir'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/ann_dir'))
assert len(test_dataset) == 1
def test_isaid():
test_dataset = iSAIDDataset(
pipeline=[],
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'),
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir'))
assert len(test_dataset) == 2
isaid_info = test_dataset.load_annotations(
img_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'),
img_suffix='.png',
ann_dir=osp.join(
osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir'),
seg_map_suffix='.png',
split=osp.join(
osp.dirname(__file__),
'../data/pseudo_isaid_dataset/splits/train.txt'))
assert len(isaid_info) == 1
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
@pytest.mark.parametrize('dataset, classes', [
('ADE20KDataset', ('wall', 'building')),
('CityscapesDataset', ('road', 'sidewalk')),
('CustomDataset', ('bus', 'car')),
('PascalVOCDataset', ('aeroplane', 'bicycle')),
])
def test_custom_classes_override_default(dataset, classes):
dataset_class = DATASETS.get(dataset)
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=classes,
test_mode=True)
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == classes
# Test setting classes as a list
custom_dataset = dataset_class(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=list(classes),
test_mode=True)
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == list(classes)
# Test overriding not a subset
custom_dataset = dataset_class(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=[classes[0]],
test_mode=True)
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == [classes[0]]
# Test default behavior
if dataset_class is CustomDataset:
with pytest.raises(AssertionError):
custom_dataset = dataset_class(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=None,
test_mode=True)
else:
custom_dataset = dataset_class(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=None,
test_mode=True)
assert custom_dataset.CLASSES == original_classes
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
def test_custom_dataset_random_palette_is_generated():
dataset = CustomDataset(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=('bus', 'car'),
test_mode=True)
assert len(dataset.PALETTE) == 2
for class_color in dataset.PALETTE:
assert len(class_color) == 3
assert all(x >= 0 and x <= 255 for x in class_color)
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
def test_custom_dataset_custom_palette():
dataset = CustomDataset(
pipeline=[],
img_dir=MagicMock(),
split=MagicMock(),
classes=('bus', 'car'),
palette=[[100, 100, 100], [200, 200, 200]],
test_mode=True)
assert tuple(dataset.PALETTE) == tuple([[100, 100, 100], [200, 200, 200]])
| 33,787 | 35.926776 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_data/test_dataset_builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os.path as osp
import pytest
from torch.utils.data import (DistributedSampler, RandomSampler,
SequentialSampler)
from mmseg.datasets import (DATASETS, ConcatDataset, MultiImageMixDataset,
build_dataloader, build_dataset)
@DATASETS.register_module()
class ToyDataset(object):
def __init__(self, cnt=0):
self.cnt = cnt
def __item__(self, idx):
return idx
def __len__(self):
return 100
def test_build_dataset():
cfg = dict(type='ToyDataset')
dataset = build_dataset(cfg)
assert isinstance(dataset, ToyDataset)
assert dataset.cnt == 0
dataset = build_dataset(cfg, default_args=dict(cnt=1))
assert isinstance(dataset, ToyDataset)
assert dataset.cnt == 1
data_root = osp.join(osp.dirname(__file__), '../data/pseudo_dataset')
img_dir = 'imgs/'
ann_dir = 'gts/'
# We use same dir twice for simplicity
# with ann_dir
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=[img_dir, img_dir],
ann_dir=[ann_dir, ann_dir])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert len(dataset) == 10
cfg = dict(type='MultiImageMixDataset', dataset=cfg, pipeline=[])
dataset = build_dataset(cfg)
assert isinstance(dataset, MultiImageMixDataset)
assert len(dataset) == 10
# with ann_dir, split
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=img_dir,
ann_dir=ann_dir,
split=['splits/train.txt', 'splits/val.txt'])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert len(dataset) == 5
# with ann_dir, split
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=img_dir,
ann_dir=[ann_dir, ann_dir],
split=['splits/train.txt', 'splits/val.txt'])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert len(dataset) == 5
# test mode
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=[img_dir, img_dir],
test_mode=True,
classes=('pseudo_class', ))
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert len(dataset) == 10
# test mode with splits
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=[img_dir, img_dir],
split=['splits/val.txt', 'splits/val.txt'],
test_mode=True,
classes=('pseudo_class', ))
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert len(dataset) == 2
# len(ann_dir) should be zero or len(img_dir) when len(img_dir) > 1
with pytest.raises(AssertionError):
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=[img_dir, img_dir],
ann_dir=[ann_dir, ann_dir, ann_dir])
build_dataset(cfg)
# len(splits) should be zero or len(img_dir) when len(img_dir) > 1
with pytest.raises(AssertionError):
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=[img_dir, img_dir],
split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt'])
build_dataset(cfg)
# len(splits) == len(ann_dir) when only len(img_dir) == 1 and len(
# ann_dir) > 1
with pytest.raises(AssertionError):
cfg = dict(
type='CustomDataset',
pipeline=[],
data_root=data_root,
img_dir=img_dir,
ann_dir=[ann_dir, ann_dir],
split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt'])
build_dataset(cfg)
def test_build_dataloader():
dataset = ToyDataset()
samples_per_gpu = 3
# dist=True, shuffle=True, 1GPU
dataloader = build_dataloader(
dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, DistributedSampler)
assert dataloader.sampler.shuffle
# dist=True, shuffle=False, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
shuffle=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, DistributedSampler)
assert not dataloader.sampler.shuffle
# dist=True, shuffle=True, 8GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
num_gpus=8)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert dataloader.num_workers == 2
# dist=False, shuffle=True, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
dist=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, RandomSampler)
assert dataloader.num_workers == 2
# dist=False, shuffle=False, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=3,
workers_per_gpu=2,
shuffle=False,
dist=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, SequentialSampler)
assert dataloader.num_workers == 2
# dist=False, shuffle=True, 8GPU
dataloader = build_dataloader(
dataset, samples_per_gpu=3, workers_per_gpu=2, num_gpus=8, dist=False)
assert dataloader.batch_size == samples_per_gpu * 8
assert len(dataloader) == int(
math.ceil(len(dataset) / samples_per_gpu / 8))
assert isinstance(dataloader.sampler, RandomSampler)
assert dataloader.num_workers == 16
| 6,416 | 30.925373 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_data/test_loading.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmseg.datasets.pipelines import LoadAnnotations, LoadImageFromFile
class TestLoading(object):
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert results['pad_shape'] == (288, 512, 3)
assert results['scale_factor'] == 1.0
np.testing.assert_equal(results['img_norm_cfg']['mean'],
np.zeros(3, dtype=np.float32))
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False,color_type='color',imdecode_backend='cv2')"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
np.testing.assert_equal(results['img_norm_cfg']['mean'],
np.zeros(1, dtype=np.float32))
def test_load_seg(self):
results = dict(
seg_prefix=self.data_prefix,
ann_info=dict(seg_map='seg.png'),
seg_fields=[])
transform = LoadAnnotations()
results = transform(copy.deepcopy(results))
assert results['seg_fields'] == ['gt_semantic_seg']
assert results['gt_semantic_seg'].shape == (288, 512)
assert results['gt_semantic_seg'].dtype == np.uint8
assert repr(transform) == transform.__class__.__name__ + \
"(reduce_zero_label=False,imdecode_backend='pillow')"
# no img_prefix
results = dict(
seg_prefix=None,
ann_info=dict(seg_map='tests/data/seg.png'),
seg_fields=[])
transform = LoadAnnotations()
results = transform(copy.deepcopy(results))
assert results['gt_semantic_seg'].shape == (288, 512)
assert results['gt_semantic_seg'].dtype == np.uint8
# reduce_zero_label
transform = LoadAnnotations(reduce_zero_label=True)
results = transform(copy.deepcopy(results))
assert results['gt_semantic_seg'].shape == (288, 512)
assert results['gt_semantic_seg'].dtype == np.uint8
# mmcv backend
results = dict(
seg_prefix=self.data_prefix,
ann_info=dict(seg_map='seg.png'),
seg_fields=[])
transform = LoadAnnotations(imdecode_backend='pillow')
results = transform(copy.deepcopy(results))
# this image is saved by PIL
assert results['gt_semantic_seg'].shape == (288, 512)
assert results['gt_semantic_seg'].dtype == np.uint8
def test_load_seg_custom_classes(self):
test_img = np.random.rand(10, 10)
test_gt = np.zeros_like(test_img)
test_gt[2:4, 2:4] = 1
test_gt[2:4, 6:8] = 2
test_gt[6:8, 2:4] = 3
test_gt[6:8, 6:8] = 4
tmp_dir = tempfile.TemporaryDirectory()
img_path = osp.join(tmp_dir.name, 'img.jpg')
gt_path = osp.join(tmp_dir.name, 'gt.png')
mmcv.imwrite(test_img, img_path)
mmcv.imwrite(test_gt, gt_path)
# test only train with label with id 3
results = dict(
img_info=dict(filename=img_path),
ann_info=dict(seg_map=gt_path),
label_map={
0: 0,
1: 0,
2: 0,
3: 1,
4: 0
},
seg_fields=[])
load_imgs = LoadImageFromFile()
results = load_imgs(copy.deepcopy(results))
load_anns = LoadAnnotations()
results = load_anns(copy.deepcopy(results))
gt_array = results['gt_semantic_seg']
true_mask = np.zeros_like(gt_array)
true_mask[6:8, 2:4] = 1
assert results['seg_fields'] == ['gt_semantic_seg']
assert gt_array.shape == (10, 10)
assert gt_array.dtype == np.uint8
np.testing.assert_array_equal(gt_array, true_mask)
# test only train with label with id 4 and 3
results = dict(
img_info=dict(filename=img_path),
ann_info=dict(seg_map=gt_path),
label_map={
0: 0,
1: 0,
2: 0,
3: 2,
4: 1
},
seg_fields=[])
load_imgs = LoadImageFromFile()
results = load_imgs(copy.deepcopy(results))
load_anns = LoadAnnotations()
results = load_anns(copy.deepcopy(results))
gt_array = results['gt_semantic_seg']
true_mask = np.zeros_like(gt_array)
true_mask[6:8, 2:4] = 2
true_mask[6:8, 6:8] = 1
assert results['seg_fields'] == ['gt_semantic_seg']
assert gt_array.shape == (10, 10)
assert gt_array.dtype == np.uint8
np.testing.assert_array_equal(gt_array, true_mask)
# test with removing a class and reducing zero label simultaneously
results = dict(
img_info=dict(filename=img_path),
ann_info=dict(seg_map=gt_path),
# since reduce_zero_label is True, there are only 4 real classes.
# if the full set of classes is ["A", "B", "C", "D"], the
# following label map simulates the dataset option
# classes=["A", "C", "D"] which removes class "B".
label_map={
0: 0,
1: 255, # simulate removing class 1
2: 1,
3: 2
},
seg_fields=[])
load_imgs = LoadImageFromFile()
results = load_imgs(copy.deepcopy(results))
# reduce zero label
load_anns = LoadAnnotations(reduce_zero_label=True)
results = load_anns(copy.deepcopy(results))
gt_array = results['gt_semantic_seg']
true_mask = np.ones_like(gt_array) * 255 # all zeros get mapped to 255
true_mask[2:4, 2:4] = 0 # 1s are reduced to class 0 mapped to class 0
true_mask[2:4, 6:8] = 255 # 2s are reduced to class 1 which is removed
true_mask[6:8, 2:4] = 1 # 3s are reduced to class 2 mapped to class 1
true_mask[6:8, 6:8] = 2 # 4s are reduced to class 3 mapped to class 2
assert results['seg_fields'] == ['gt_semantic_seg']
assert gt_array.shape == (10, 10)
assert gt_array.dtype == np.uint8
np.testing.assert_array_equal(gt_array, true_mask)
# test no custom classes
results = dict(
img_info=dict(filename=img_path),
ann_info=dict(seg_map=gt_path),
seg_fields=[])
load_imgs = LoadImageFromFile()
results = load_imgs(copy.deepcopy(results))
load_anns = LoadAnnotations()
results = load_anns(copy.deepcopy(results))
gt_array = results['gt_semantic_seg']
assert results['seg_fields'] == ['gt_semantic_seg']
assert gt_array.shape == (10, 10)
assert gt_array.dtype == np.uint8
np.testing.assert_array_equal(gt_array, test_gt)
# test no custom classes
results = dict(
img_info=dict(filename=img_path),
ann_info=dict(seg_map=gt_path),
seg_fields=[])
load_imgs = LoadImageFromFile()
results = load_imgs(copy.deepcopy(results))
load_anns = LoadAnnotations()
results = load_anns(copy.deepcopy(results))
gt_array = results['gt_semantic_seg']
assert results['seg_fields'] == ['gt_semantic_seg']
assert gt_array.shape == (10, 10)
assert gt_array.dtype == np.uint8
np.testing.assert_array_equal(gt_array, test_gt)
tmp_dir.cleanup()
| 9,151 | 34.890196 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_data/test_transform.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from PIL import Image
from mmseg.datasets.builder import PIPELINES
def test_resize_to_multiple():
transform = dict(type='ResizeToMultiple', size_divisor=32)
transform = build_from_cfg(transform, PIPELINES)
img = np.random.randn(213, 232, 3)
seg = np.random.randint(0, 19, (213, 232))
results = dict()
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['pad_shape'] = img.shape
results = transform(results)
assert results['img'].shape == (224, 256, 3)
assert results['gt_semantic_seg'].shape == (224, 256)
assert results['img_shape'] == (224, 256, 3)
assert results['pad_shape'] == (224, 256, 3)
def test_resize():
# test assertion if img_scale is a list
with pytest.raises(AssertionError):
transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion if len(img_scale) while ratio_range is not None
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
ratio_range=(0.9, 1.1),
keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid multiscale_mode
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
keep_ratio=True,
multiscale_mode='2333')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
results = dict()
# (288, 512, 3)
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (750, 1333, 3)
# test keep_ratio=False
transform = dict(
type='Resize',
img_scale=(1280, 800),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (800, 1280, 3)
# test multiscale_mode='range'
transform = dict(
type='Resize',
img_scale=[(1333, 400), (1333, 1200)],
multiscale_mode='range',
keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333
assert min(resized_results['img_shape'][:2]) >= 400
assert min(resized_results['img_shape'][:2]) <= 1200
# test multiscale_mode='value'
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 400)],
multiscale_mode='value',
keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] in [(750, 1333, 3), (400, 711, 3)]
# test multiscale_mode='range'
transform = dict(
type='Resize',
img_scale=(1333, 800),
ratio_range=(0.9, 1.1),
keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333 * 1.1
# test img_scale=None and ratio_range is tuple.
# img shape: (288, 512, 3)
transform = dict(
type='Resize', img_scale=None, ratio_range=(0.5, 2.0), keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert int(288 * 0.5) <= resized_results['img_shape'][0] <= 288 * 2.0
assert int(512 * 0.5) <= resized_results['img_shape'][1] <= 512 * 2.0
# test min_size=640
transform = dict(type='Resize', img_scale=(2560, 640), min_size=640)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138, 3)
# test min_size=640 and img_scale=(512, 640)
transform = dict(type='Resize', img_scale=(512, 640), min_size=640)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138, 3)
# test h > w
img = np.random.randn(512, 288, 3)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
transform = dict(type='Resize', img_scale=(2560, 640), min_size=640)
resize_module = build_from_cfg(transform, PIPELINES)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (1138, 640, 3)
def test_flip():
# test assertion for invalid prob
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1, direction='horizonta')
build_from_cfg(transform, PIPELINES)
transform = dict(type='RandomFlip', prob=1)
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
original_seg = copy.deepcopy(seg)
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = flip_module(results)
flip_module = build_from_cfg(transform, PIPELINES)
results = flip_module(results)
assert np.equal(original_img, results['img']).all()
assert np.equal(original_seg, results['gt_semantic_seg']).all()
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
transform = dict(type='RandomCrop', crop_size=(-1, 0))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
transform = dict(type='RandomCrop', crop_size=(h - 20, w - 20))
crop_module = build_from_cfg(transform, PIPELINES)
results = crop_module(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
assert results['img_shape'][:2] == (h - 20, w - 20)
assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20)
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', size_divisor=32)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
resize_transform = dict(
type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(resize_transform, PIPELINES)
results = resize_module(results)
results = transform(results)
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_rotate():
# test assertion degree should be tuple[float] or float
with pytest.raises(AssertionError):
transform = dict(type='RandomRotate', prob=0.5, degree=-10)
build_from_cfg(transform, PIPELINES)
# test assertion degree should be tuple[float] or float
with pytest.raises(AssertionError):
transform = dict(type='RandomRotate', prob=0.5, degree=(10., 20., 30.))
build_from_cfg(transform, PIPELINES)
transform = dict(type='RandomRotate', degree=10., prob=1.)
transform = build_from_cfg(transform, PIPELINES)
assert str(transform) == f'RandomRotate(' \
f'prob={1.}, ' \
f'degree=({-10.}, {10.}), ' \
f'pad_val={0}, ' \
f'seg_pad_val={255}, ' \
f'center={None}, ' \
f'auto_bound={False})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, _ = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape[:2] == (h, w)
assert results['gt_semantic_seg'].shape[:2] == (h, w)
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_rgb2gray():
# test assertion out_channels should be greater than 0
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=-1)
build_from_cfg(transform, PIPELINES)
# test assertion weights should be tuple[float]
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=1, weights=1.1)
build_from_cfg(transform, PIPELINES)
# test out_channels is None
transform = dict(type='RGB2Gray')
transform = build_from_cfg(transform, PIPELINES)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={None}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, c)
assert results['img_shape'] == (h, w, c)
assert results['ori_shape'] == (h, w, c)
# test out_channels = 2
transform = dict(type='RGB2Gray', out_channels=2)
transform = build_from_cfg(transform, PIPELINES)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={2}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, 2)
assert results['img_shape'] == (h, w, 2)
assert results['ori_shape'] == (h, w, c)
def test_adjust_gamma():
# test assertion if gamma <= 0
with pytest.raises(AssertionError):
transform = dict(type='AdjustGamma', gamma=0)
build_from_cfg(transform, PIPELINES)
# test assertion if gamma is list
with pytest.raises(AssertionError):
transform = dict(type='AdjustGamma', gamma=[1.2])
build_from_cfg(transform, PIPELINES)
# test with gamma = 1.2
transform = dict(type='AdjustGamma', gamma=1.2)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
inv_gamma = 1.0 / 1.2
table = np.array([((i / 255.0)**inv_gamma) * 255
for i in np.arange(0, 256)]).astype('uint8')
converted_img = mmcv.lut_transform(
np.array(original_img, dtype=np.uint8), table)
assert np.allclose(results['img'], converted_img)
assert str(transform) == f'AdjustGamma(gamma={1.2})'
def test_rerange():
# test assertion if min_value or max_value is illegal
with pytest.raises(AssertionError):
transform = dict(type='Rerange', min_value=[0], max_value=[255])
build_from_cfg(transform, PIPELINES)
# test assertion if min_value >= max_value
with pytest.raises(AssertionError):
transform = dict(type='Rerange', min_value=1, max_value=1)
build_from_cfg(transform, PIPELINES)
# test assertion if img_min_value == img_max_value
with pytest.raises(AssertionError):
transform = dict(type='Rerange', min_value=0, max_value=1)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
results['img'] = np.array([[1, 1], [1, 1]])
transform(results)
img_rerange_cfg = dict()
transform = dict(type='Rerange', **img_rerange_cfg)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
min_value = np.min(original_img)
max_value = np.max(original_img)
converted_img = (original_img - min_value) / (max_value - min_value) * 255
assert np.allclose(results['img'], converted_img)
assert str(transform) == f'Rerange(min_value={0}, max_value={255})'
def test_CLAHE():
# test assertion if clip_limit is None
with pytest.raises(AssertionError):
transform = dict(type='CLAHE', clip_limit=None)
build_from_cfg(transform, PIPELINES)
# test assertion if tile_grid_size is illegal
with pytest.raises(AssertionError):
transform = dict(type='CLAHE', tile_grid_size=(8.0, 8.0))
build_from_cfg(transform, PIPELINES)
# test assertion if tile_grid_size is illegal
with pytest.raises(AssertionError):
transform = dict(type='CLAHE', tile_grid_size=(9, 9, 9))
build_from_cfg(transform, PIPELINES)
transform = dict(type='CLAHE', clip_limit=2)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
converted_img = np.empty(original_img.shape)
for i in range(original_img.shape[2]):
converted_img[:, :, i] = mmcv.clahe(
np.array(original_img[:, :, i], dtype=np.uint8), 2, (8, 8))
assert np.allclose(results['img'], converted_img)
assert str(transform) == f'CLAHE(clip_limit={2}, tile_grid_size={(8, 8)})'
def test_seg_rescale():
results = dict()
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
h, w = seg.shape
transform = dict(type='SegRescale', scale_factor=1. / 2)
rescale_module = build_from_cfg(transform, PIPELINES)
rescale_results = rescale_module(results.copy())
assert rescale_results['gt_semantic_seg'].shape == (h // 2, w // 2)
transform = dict(type='SegRescale', scale_factor=1)
rescale_module = build_from_cfg(transform, PIPELINES)
rescale_results = rescale_module(results.copy())
assert rescale_results['gt_semantic_seg'].shape == (h, w)
def test_cutout():
# test prob
with pytest.raises(AssertionError):
transform = dict(type='RandomCutOut', prob=1.5, n_holes=1)
build_from_cfg(transform, PIPELINES)
# test n_holes
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut', prob=0.5, n_holes=(5, 3), cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut',
prob=0.5,
n_holes=(3, 4, 5),
cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
# test cutout_shape and cutout_ratio
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut', prob=0.5, n_holes=1, cutout_shape=8)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut', prob=0.5, n_holes=1, cutout_ratio=0.2)
build_from_cfg(transform, PIPELINES)
# either of cutout_shape and cutout_ratio should be given
with pytest.raises(AssertionError):
transform = dict(type='RandomCutOut', prob=0.5, n_holes=1)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut',
prob=0.5,
n_holes=1,
cutout_shape=(2, 2),
cutout_ratio=(0.4, 0.4))
build_from_cfg(transform, PIPELINES)
# test seg_fill_in
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut',
prob=0.5,
n_holes=1,
cutout_shape=(8, 8),
seg_fill_in='a')
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='RandomCutOut',
prob=0.5,
n_holes=1,
cutout_shape=(8, 8),
seg_fill_in=256)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['pad_shape'] = img.shape
results['img_fields'] = ['img']
transform = dict(
type='RandomCutOut', prob=1, n_holes=1, cutout_shape=(10, 10))
cutout_module = build_from_cfg(transform, PIPELINES)
assert 'cutout_shape' in repr(cutout_module)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(
type='RandomCutOut', prob=1, n_holes=1, cutout_ratio=(0.8, 0.8))
cutout_module = build_from_cfg(transform, PIPELINES)
assert 'cutout_ratio' in repr(cutout_module)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(
type='RandomCutOut', prob=0, n_holes=1, cutout_ratio=(0.8, 0.8))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() == img.sum()
assert cutout_result['gt_semantic_seg'].sum() == seg.sum()
transform = dict(
type='RandomCutOut',
prob=1,
n_holes=(2, 4),
cutout_shape=[(10, 10), (15, 15)],
fill_in=(255, 255, 255),
seg_fill_in=None)
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
assert cutout_result['gt_semantic_seg'].sum() == seg.sum()
transform = dict(
type='RandomCutOut',
prob=1,
n_holes=1,
cutout_ratio=(0.8, 0.8),
fill_in=(255, 255, 255),
seg_fill_in=255)
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
assert cutout_result['gt_semantic_seg'].sum() > seg.sum()
def test_mosaic():
# test prob
with pytest.raises(AssertionError):
transform = dict(type='RandomMosaic', prob=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid img_scale
with pytest.raises(AssertionError):
transform = dict(type='RandomMosaic', prob=1, img_scale=640)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
transform = dict(type='RandomMosaic', prob=1, img_scale=(10, 12))
mosaic_module = build_from_cfg(transform, PIPELINES)
assert 'Mosaic' in repr(mosaic_module)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
mosaic_module(results)
results['mix_results'] = [copy.deepcopy(results)] * 3
results = mosaic_module(results)
assert results['img'].shape[:2] == (20, 24)
results = dict()
results['img'] = img[:, :, 0]
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
transform = dict(type='RandomMosaic', prob=0, img_scale=(10, 12))
mosaic_module = build_from_cfg(transform, PIPELINES)
results['mix_results'] = [copy.deepcopy(results)] * 3
results = mosaic_module(results)
assert results['img'].shape[:2] == img.shape[:2]
transform = dict(type='RandomMosaic', prob=1, img_scale=(10, 12))
mosaic_module = build_from_cfg(transform, PIPELINES)
results = mosaic_module(results)
assert results['img'].shape[:2] == (20, 24)
def test_albu_transform():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = build_from_cfg(albu_transform, PIPELINES)
normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True)
normalize = build_from_cfg(normalize, PIPELINES)
# Execute transforms
results = load(results)
results = albu_transform(results)
results = normalize(results)
assert results['img'].dtype == np.float32
def test_albu_channel_order():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
# Transform is modifying B channel
albu_transform = dict(
type='Albu',
transforms=[
dict(
type='RGBShift',
r_shift_limit=0,
g_shift_limit=0,
b_shift_limit=200,
p=1)
])
albu_transform = build_from_cfg(albu_transform, PIPELINES)
# Execute transforms
results_load = load(results)
results_albu = albu_transform(results_load)
# assert only Green and Red channel are not modified
np.testing.assert_array_equal(results_albu['img'][..., 1:],
results_load['img'][..., 1:])
# assert Blue channel is modified
with pytest.raises(AssertionError):
np.testing.assert_array_equal(results_albu['img'][..., 0],
results_load['img'][..., 0])
| 26,888 | 34.804261 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_data/test_tta.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import pytest
from mmcv.utils import build_from_cfg
from mmseg.datasets.builder import PIPELINES
def test_multi_scale_flip_aug():
# test assertion if img_scale=None, img_ratios=1 (not float).
with pytest.raises(AssertionError):
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=None,
img_ratios=1,
transforms=[dict(type='Resize', keep_ratio=False)],
)
build_from_cfg(tta_transform, PIPELINES)
# test assertion if img_scale=None, img_ratios=None.
with pytest.raises(AssertionError):
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=None,
img_ratios=None,
transforms=[dict(type='Resize', keep_ratio=False)],
)
build_from_cfg(tta_transform, PIPELINES)
# test assertion if img_scale=(512, 512), img_ratios=1 (not float).
with pytest.raises(AssertionError):
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=1,
transforms=[dict(type='Resize', keep_ratio=False)],
)
build_from_cfg(tta_transform, PIPELINES)
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=[0.5, 1.0, 2.0],
flip=False,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
results = dict()
# (288, 512, 3)
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 256), (512, 512), (1024, 1024)]
assert tta_results['flip'] == [False, False, False]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=[0.5, 1.0, 2.0],
flip=True,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 256), (256, 256), (512, 512),
(512, 512), (1024, 1024), (1024, 1024)]
assert tta_results['flip'] == [False, True, False, True, False, True]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=1.0,
flip=False,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(512, 512)]
assert tta_results['flip'] == [False]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=1.0,
flip=True,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(512, 512), (512, 512)]
assert tta_results['flip'] == [False, True]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=None,
img_ratios=[0.5, 1.0, 2.0],
flip=False,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 144), (512, 288), (1024, 576)]
assert tta_results['flip'] == [False, False, False]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=None,
img_ratios=[0.5, 1.0, 2.0],
flip=True,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 144), (256, 144), (512, 288),
(512, 288), (1024, 576), (1024, 576)]
assert tta_results['flip'] == [False, True, False, True, False, True]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(256, 256), (512, 512), (1024, 1024)],
img_ratios=None,
flip=False,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 256), (512, 512), (1024, 1024)]
assert tta_results['flip'] == [False, False, False]
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(256, 256), (512, 512), (1024, 1024)],
img_ratios=None,
flip=True,
transforms=[dict(type='Resize', keep_ratio=False)],
)
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 256), (256, 256), (512, 512),
(512, 512), (1024, 1024), (1024, 1024)]
assert tta_results['flip'] == [False, True, False, True, False, True]
# test assertion if flip is True and Pad executed before RandomFlip
with pytest.raises(AssertionError):
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(256, 256), (512, 512), (1024, 1024)],
img_ratios=None,
flip=True,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip'),
])
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(256, 256), (512, 512), (1024, 1024)],
img_ratios=None,
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
])
tta_module = build_from_cfg(tta_transform, PIPELINES)
tta_results = tta_module(results.copy())
assert tta_results['scale'] == [(256, 256), (256, 256), (512, 512),
(512, 512), (1024, 1024), (1024, 1024)]
assert tta_results['flip'] == [False, True, False, True, False, True]
assert tta_results['img_shape'] == [(144, 256, 3), (144, 256, 3),
(288, 512, 3), (288, 512, 3),
(576, 1024, 3), (576, 1024, 3)]
assert tta_results['pad_shape'] == [(160, 256, 3), (160, 256, 3),
(288, 512, 3), (288, 512, 3),
(576, 1024, 3), (576, 1024, 3)]
for i in range(len(tta_results['img'])):
assert tta_results['img'][i].shape == tta_results['pad_shape'][i]
| 7,146 | 36.615789 | 75 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_forward.py | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.cnn.utils import revert_sync_batchnorm
def _demo_mm_inputs(input_shape=(2, 3, 8, 16), num_classes=10):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
'flip_direction': 'horizontal'
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs
def _get_config_directory():
"""Find the predefined segmentor config directory."""
try:
# Assume we are running in the source mmsegmentation repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmseg
repo_dpath = dirname(dirname(dirname(mmseg.__file__)))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_segmentor_cfg(fname):
"""Grab configs necessary to create a segmentor.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def test_pspnet_forward():
_test_encoder_decoder_forward(
'pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py')
def test_fcn_forward():
_test_encoder_decoder_forward('fcn/fcn_r50-d8_512x1024_40k_cityscapes.py')
def test_deeplabv3_forward():
_test_encoder_decoder_forward(
'deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py')
def test_deeplabv3plus_forward():
_test_encoder_decoder_forward(
'deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py')
def test_gcnet_forward():
_test_encoder_decoder_forward(
'gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py')
def test_ann_forward():
_test_encoder_decoder_forward('ann/ann_r50-d8_512x1024_40k_cityscapes.py')
def test_ccnet_forward():
if not torch.cuda.is_available():
pytest.skip('CCNet requires CUDA')
_test_encoder_decoder_forward(
'ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py')
def test_danet_forward():
_test_encoder_decoder_forward(
'danet/danet_r50-d8_512x1024_40k_cityscapes.py')
def test_nonlocal_net_forward():
_test_encoder_decoder_forward(
'nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py')
def test_upernet_forward():
_test_encoder_decoder_forward(
'upernet/upernet_r50_512x1024_40k_cityscapes.py')
def test_hrnet_forward():
_test_encoder_decoder_forward('hrnet/fcn_hr18s_512x1024_40k_cityscapes.py')
def test_ocrnet_forward():
_test_encoder_decoder_forward(
'ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py')
def test_psanet_forward():
_test_encoder_decoder_forward(
'psanet/psanet_r50-d8_512x1024_40k_cityscapes.py')
def test_encnet_forward():
_test_encoder_decoder_forward(
'encnet/encnet_r50-d8_512x1024_40k_cityscapes.py')
def test_sem_fpn_forward():
_test_encoder_decoder_forward('sem_fpn/fpn_r50_512x1024_80k_cityscapes.py')
def test_point_rend_forward():
_test_encoder_decoder_forward(
'point_rend/pointrend_r50_512x1024_80k_cityscapes.py')
def test_mobilenet_v2_forward():
_test_encoder_decoder_forward(
'mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py')
def test_dnlnet_forward():
_test_encoder_decoder_forward(
'dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py')
def test_emanet_forward():
_test_encoder_decoder_forward(
'emanet/emanet_r50-d8_512x1024_80k_cityscapes.py')
def test_isanet_forward():
_test_encoder_decoder_forward(
'isanet/isanet_r50-d8_512x1024_40k_cityscapes.py')
def get_world_size(process_group):
return 1
def _check_input_dim(self, inputs):
pass
@patch('torch.nn.modules.batchnorm._BatchNorm._check_input_dim',
_check_input_dim)
@patch('torch.distributed.get_world_size', get_world_size)
def _test_encoder_decoder_forward(cfg_file):
model = _get_segmentor_cfg(cfg_file)
model['pretrained'] = None
model['test_cfg']['mode'] = 'whole'
from mmseg.models import build_segmentor
segmentor = build_segmentor(model)
segmentor.init_weights()
if isinstance(segmentor.decode_head, nn.ModuleList):
num_classes = segmentor.decode_head[-1].num_classes
else:
num_classes = segmentor.decode_head.num_classes
# batch_size=2 for BatchNorm
input_shape = (2, 3, 32, 32)
mm_inputs = _demo_mm_inputs(input_shape, num_classes=num_classes)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_semantic_seg = mm_inputs['gt_semantic_seg']
# convert to cuda Tensor if applicable
if torch.cuda.is_available():
segmentor = segmentor.cuda()
imgs = imgs.cuda()
gt_semantic_seg = gt_semantic_seg.cuda()
else:
segmentor = revert_sync_batchnorm(segmentor)
# Test forward train
losses = segmentor.forward(
imgs, img_metas, gt_semantic_seg=gt_semantic_seg, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
segmentor.eval()
# pack into lists
img_list = [img[None, :] for img in imgs]
img_meta_list = [[img_meta] for img_meta in img_metas]
segmentor.forward(img_list, img_meta_list, return_loss=False)
| 6,534 | 26.690678 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import all_zeros, check_norm_state, is_block, is_norm
__all__ = ['is_norm', 'is_block', 'all_zeros', 'check_norm_state']
| 182 | 35.6 | 66 | py |
Subsets and Splits