Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
IID_representation_learning | IID_representation_learning-master/restyle/models/e4e_modules/discriminator.py | from torch import nn
class LatentCodesDiscriminator(nn.Module):
def __init__(self, style_dim, n_mlp):
super().__init__()
self.style_dim = style_dim
layers = []
for i in range(n_mlp-1):
layers.append(
nn.Linear(style_dim, style_dim)
)
layers.append(nn.LeakyReLU(0.2))
layers.append(nn.Linear(512, 1))
self.mlp = nn.Sequential(*layers)
def forward(self, w):
return self.mlp(w)
| 496 | 22.666667 | 47 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/e4e_modules/latent_codes_pool.py | import random
import torch
class LatentCodesPool:
"""This class implements latent codes buffer that stores previously generated w latent codes.
This buffer enables us to update discriminators using a history of generated w's
rather than the ones produced by the latest encoder.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_ws = 0
self.ws = []
def query(self, ws):
"""Return w's from the pool.
Parameters:
ws: the latest generated w's from the generator
Returns w's from the buffer.
By 50/100, the buffer will return input w's.
By 50/100, the buffer will return w's previously stored in the buffer,
and insert the current w's to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return ws
return_ws = []
for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512)
# w = torch.unsqueeze(image.data, 0)
if w.ndim == 2:
i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate
w = w[i]
self.handle_w(w, return_ws)
return_ws = torch.stack(return_ws, 0) # collect all the images and return
return return_ws
def handle_w(self, w, return_ws):
if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer
self.num_ws = self.num_ws + 1
self.ws.append(w)
return_ws.append(w)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.ws[random_id].clone()
self.ws[random_id] = w
return_ws.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_ws.append(w)
| 2,349 | 40.964286 | 141 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/fpn_encoders.py | import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from torchvision.models.resnet import resnet34
from models.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE
from models.encoders.map2style import GradualStyleBlock
class GradualStyleEncoder(Module):
"""
Original encoder architecture from pixel2style2pixel. This classes uses an FPN-based architecture applied over
an ResNet IRSE-50 backbone.
Note this class is designed to be used for the human facial domain.
"""
def __init__(self, num_layers, mode='ir', n_styles=18, opts=None):
super(GradualStyleEncoder, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
def forward(self, x):
x = self.input_layer(x)
latents = []
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = self._upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = self._upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out
class ResNetGradualStyleEncoder(Module):
"""
Original encoder architecture from pixel2style2pixel. This classes uses an FPN-based architecture applied over
an ResNet34 backbone.
"""
def __init__(self, n_styles=18, opts=None):
super(ResNetGradualStyleEncoder, self).__init__()
self.conv1 = nn.Conv2d(opts.input_nc, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = PReLU(64)
resnet_basenet = resnet34(pretrained=True)
blocks = [
resnet_basenet.layer1,
resnet_basenet.layer2,
resnet_basenet.layer3,
resnet_basenet.layer4
]
modules = []
for block in blocks:
for bottleneck in block:
modules.append(bottleneck)
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
latents = []
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 12:
c2 = x
elif i == 15:
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = self._upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = self._upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out
| 5,672 | 34.45625 | 114 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/helpers.py | from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
| 3,556 | 28.641667 | 112 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/map2style.py | import numpy as np
from torch import nn
from torch.nn import Conv2d, Module
from models.stylegan2.model import EqualLinear
class GradualStyleBlock(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
self.norm = nn.LayerNorm([out_c], elementwise_affine=False)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
x = self.norm(x)
return x
class GradualNoiseBlock(Module):
def __init__(self, in_c, out_c, stride, affine):
super(GradualNoiseBlock, self).__init__()
self.conv = nn.Conv2d(in_c, out_c, kernel_size=3,
stride=stride, padding=1, bias=False)
self.norm = nn.InstanceNorm2d(out_c, affine=True)
self.relu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(out_c, 1, kernel_size=3,
stride=1, padding=1, bias=False)
self.norm1 = nn.InstanceNorm2d(1, affine=affine)
self.downsample = nn.Conv2d(in_c, 1, kernel_size=3,
stride=2, padding=1, bias=False)
def forward(self, x):
identity = self.downsample(x)
x = self.conv(x)
x = self.norm(x)
x = self.relu(x)
y = self.conv1(x) + identity
y = self.norm1(y)
return x, y
| 1,887 | 32.714286 | 76 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/model_irse.py | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
if input_size == 112:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512, affine=affine))
else:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 14 * 14, 512),
BatchNorm1d(512, affine=affine))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def IR_50(input_size):
"""Constructs a ir-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_101(input_size):
"""Constructs a ir-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_152(input_size):
"""Constructs a ir-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_SE_50(input_size):
"""Constructs a ir_se-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_101(input_size):
"""Constructs a ir_se-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_152(input_size):
"""Constructs a ir_se-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
return model
| 2,836 | 32.376471 | 97 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/restyle_e4e_encoders.py | from enum import Enum
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from torchvision.models import resnet34
from models.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE
from models.encoders.map2style import GradualStyleBlock
class ProgressiveStage(Enum):
WTraining = 0
Delta1Training = 1
Delta2Training = 2
Delta3Training = 3
Delta4Training = 4
Delta5Training = 5
Delta6Training = 6
Delta7Training = 7
Delta8Training = 8
Delta9Training = 9
Delta10Training = 10
Delta11Training = 11
Delta12Training = 12
Delta13Training = 13
Delta14Training = 14
Delta15Training = 15
Delta16Training = 16
Delta17Training = 17
Inference = 18
class ProgressiveBackboneEncoder(Module):
"""
The simpler backbone architecture used by ReStyle where all style vectors are extracted from the final 16x16 feature
map of the encoder. This classes uses the simplified architecture applied over an ResNet IRSE50 backbone with the
progressive training scheme from e4e_modules.
Note this class is designed to be used for the human facial domain.
"""
def __init__(self, num_layers, mode='ir', n_styles=18, opts=None):
super(ProgressiveBackboneEncoder, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
for i in range(self.style_count):
style = GradualStyleBlock(512, 512, 16)
self.styles.append(style)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to
def set_progressive_stage(self, new_stage: ProgressiveStage):
# In this encoder we train all the pyramid (At least as a first stage experiment
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
# get initial w0 from first map2style layer
w0 = self.styles[0](x)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
# learn the deltas up to the current stage
stage = self.progressive_stage.value
for i in range(1, min(stage + 1, self.style_count)):
delta_i = self.styles[i](x)
w[:, i] += delta_i
return w
class ResNetProgressiveBackboneEncoder(Module):
"""
The simpler backbone architecture used by ReStyle where all style vectors are extracted from the final 16x16 feature
map of the encoder. This classes uses the simplified architecture applied over an ResNet34 backbone with the
progressive training scheme from e4e_modules.
"""
def __init__(self, n_styles=18, opts=None):
super(ResNetProgressiveBackboneEncoder, self).__init__()
self.conv1 = nn.Conv2d(opts.input_nc, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = PReLU(64)
resnet_basenet = resnet34(pretrained=True)
blocks = [
resnet_basenet.layer1,
resnet_basenet.layer2,
resnet_basenet.layer3,
resnet_basenet.layer4
]
modules = []
for block in blocks:
for bottleneck in block:
modules.append(bottleneck)
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
for i in range(self.style_count):
style = GradualStyleBlock(512, 512, 16)
self.styles.append(style)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to
def set_progressive_stage(self, new_stage: ProgressiveStage):
# In this encoder we train all the pyramid (At least as a first stage experiment
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.body(x)
# get initial w0 from first map2style layer
w0 = self.styles[0](x)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
# learn the deltas up to the current stage
stage = self.progressive_stage.value
for i in range(1, min(stage + 1, self.style_count)):
delta_i = self.styles[i](x)
w[:, i] += delta_i
return w
| 5,676 | 36.846667 | 120 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/encoders/restyle_psp_encoders.py | import torch
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from torchvision.models.resnet import resnet34
from models.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE
from models.encoders.map2style import GradualStyleBlock, GradualNoiseBlock
class BackboneEncoder(Module):
"""
The simpler backbone architecture used by ReStyle where all style vectors are extracted from the final 16x16 feature
map of the encoder. This classes uses the simplified architecture applied over an ResNet IRSE-50 backbone.
Note this class is designed to be used for the human facial domain.
"""
def __init__(self, num_layers, mode='ir', n_styles=18, opts=None):
super(BackboneEncoder, self).__init__()
assert num_layers in [50, 100, 152], \
'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
for i in range(self.style_count):
style = GradualStyleBlock(512, 512, 16)
self.styles.append(style)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
latents = []
for j in range(self.style_count):
latents.append(self.styles[j](x))
out = torch.stack(latents, dim=1)
return out
class ResNetBackboneEncoder(Module):
"""
The simpler backbone architecture used by ReStyle where all style vectors are extracted from the final 16x16 feature
map of the encoder. This classes uses the simplified architecture applied over an ResNet34 backbone.
"""
def __init__(self, n_styles=18, opts=None, affine=True):
super(ResNetBackboneEncoder, self).__init__()
self.conv1 = nn.Conv2d(opts.input_nc, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.InstanceNorm2d(64, affine=True)
self.relu = PReLU(64)
resnet_basenet = resnet34(pretrained=False,
norm_layer=nn.InstanceNorm2d)
blocks = [
resnet_basenet.layer1,
resnet_basenet.layer2,
resnet_basenet.layer3,
resnet_basenet.layer4
]
modules = []
for block in blocks:
for bottleneck in block:
modules.append(bottleneck)
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
for _ in range(self.style_count):
style = GradualStyleBlock(512, 512, 16)
self.styles.append(style)
self.noises = nn.ModuleList()
self.noises_cnt = (n_styles - 1) // 2 - 1
_inp_nc = 3
_out_nc = 32
_strd = 2
for i in range(self.noises_cnt):
noise = GradualNoiseBlock(_inp_nc, _out_nc, _strd, affine)
self.noises.append(noise)
_inp_nc = _out_nc
_out_nc *= 2
print(self.noises)
def forward(self, x):
sty = self.conv1(x)
sty = self.bn1(sty)
sty = self.relu(sty)
sty = self.body(sty)
styles = []
for j in range(self.style_count):
styles.append(self.styles[j](sty))
styles = torch.stack(styles, dim=1)
noise = x[:, :3].detach().clone()
noises = list([None, None])
for i in range(self.noises_cnt):
noise, noise_out = self.noises[i](noise)
noises = [noise_out, noise_out] + noises
noises = [None] + noises
return styles, noises
| 4,334 | 35.737288 | 120 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn.py | import numpy as np
import torch
from PIL import Image
from models.mtcnn.mtcnn_pytorch.src.get_nets import PNet, RNet, ONet
from models.mtcnn.mtcnn_pytorch.src.box_utils import nms, calibrate_box, get_image_boxes, convert_to_square
from models.mtcnn.mtcnn_pytorch.src.first_stage import run_first_stage
from models.mtcnn.mtcnn_pytorch.src.align_trans import get_reference_facial_points, warp_and_crop_face
device = 'cuda:0'
class MTCNN():
def __init__(self):
print(device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.eval()
self.rnet.eval()
self.onet.eval()
self.refrence = get_reference_facial_points(default_square=True)
def align(self, img):
_, landmarks = self.detect_faces(img)
if len(landmarks) == 0:
return None, None
facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)]
warped_face, tfm = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112))
return Image.fromarray(warped_face), tfm
def align_multi(self, img, limit=None, min_face_size=30.0):
boxes, landmarks = self.detect_faces(img, min_face_size)
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
tfms = []
for landmark in landmarks:
facial5points = [[landmark[j], landmark[j + 5]] for j in range(5)]
warped_face, tfm = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112))
faces.append(Image.fromarray(warped_face))
tfms.append(tfm)
return boxes, faces, tfms
def detect_faces(self, image, min_face_size=20.0,
thresholds=[0.15, 0.25, 0.35],
nms_thresholds=[0.7, 0.7, 0.7]):
"""
Arguments:
image: an instance of PIL.Image.
min_face_size: a float number.
thresholds: a list of length 3.
nms_thresholds: a list of length 3.
Returns:
two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
bounding boxes and facial landmarks.
"""
# BUILD AN IMAGE PYRAMID
width, height = image.size
min_length = min(height, width)
min_detection_size = 12
factor = 0.707 # sqrt(0.5)
# scales for scaling the image
scales = []
# scales the image so that
# minimum size that we can detect equals to
# minimum face size that we want to detect
m = min_detection_size / min_face_size
min_length *= m
factor_count = 0
while min_length > min_detection_size:
scales.append(m * factor ** factor_count)
min_length *= factor
factor_count += 1
# STAGE 1
# it will be returned
bounding_boxes = []
with torch.no_grad():
# run P-Net on different scales
for s in scales:
boxes = run_first_stage(image, self.pnet, scale=s, threshold=thresholds[0])
bounding_boxes.append(boxes)
# collect boxes (and offsets, and scores) from different scales
bounding_boxes = [i for i in bounding_boxes if i is not None]
bounding_boxes = np.vstack(bounding_boxes)
keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])
bounding_boxes = bounding_boxes[keep]
# use offsets predicted by pnet to transform bounding boxes
bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
# shape [n_boxes, 5]
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 2
img_boxes = get_image_boxes(bounding_boxes, image, size=24)
img_boxes = torch.FloatTensor(img_boxes).to(device)
output = self.rnet(img_boxes)
offsets = output[0].cpu().data.numpy() # shape [n_boxes, 4]
probs = output[1].cpu().data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[1])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
keep = nms(bounding_boxes, nms_thresholds[1])
bounding_boxes = bounding_boxes[keep]
bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 3
img_boxes = get_image_boxes(bounding_boxes, image, size=48)
if len(img_boxes) == 0:
return [], []
img_boxes = torch.FloatTensor(img_boxes).to(device)
output = self.onet(img_boxes)
landmarks = output[0].cpu().data.numpy() # shape [n_boxes, 10]
offsets = output[1].cpu().data.numpy() # shape [n_boxes, 4]
probs = output[2].cpu().data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[2])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
landmarks = landmarks[keep]
# compute landmark points
width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5]
landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10]
bounding_boxes = calibrate_box(bounding_boxes, offsets)
keep = nms(bounding_boxes, nms_thresholds[2], mode='min')
bounding_boxes = bounding_boxes[keep]
landmarks = landmarks[keep]
return bounding_boxes, landmarks
| 6,220 | 38.624204 | 116 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/__init__.py | from .visualization_utils import show_bboxes
from .detector import detect_faces
| 80 | 26 | 44 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/align_trans.py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 15:43:29 2017
@author: zhaoy
"""
import numpy as np
import cv2
# from scipy.linalg import lstsq
# from scipy.ndimage import geometric_transform # , map_coordinates
from models.mtcnn.mtcnn_pytorch.src.matlab_cp2tform import get_similarity_transform_for_cv2
# reference facial points, a list of coordinates (x,y)
REFERENCE_FACIAL_POINTS = [
[30.29459953, 51.69630051],
[65.53179932, 51.50139999],
[48.02519989, 71.73660278],
[33.54930115, 92.3655014],
[62.72990036, 92.20410156]
]
DEFAULT_CROP_SIZE = (96, 112)
class FaceWarpException(Exception):
def __str__(self):
return 'In File {}:{}'.format(
__file__, super.__str__(self))
def get_reference_facial_points(output_size=None,
inner_padding_factor=0.0,
outer_padding=(0, 0),
default_square=False):
"""
Function:
----------
get reference 5 key points according to crop settings:
0. Set default crop_size:
if default_square:
crop_size = (112, 112)
else:
crop_size = (96, 112)
1. Pad the crop_size by inner_padding_factor in each side;
2. Resize crop_size into (output_size - outer_padding*2),
pad into output_size with outer_padding;
3. Output reference_5point;
Parameters:
----------
@output_size: (w, h) or None
size of aligned face image
@inner_padding_factor: (w_factor, h_factor)
padding factor for inner (w, h)
@outer_padding: (w_pad, h_pad)
each row is a pair of coordinates (x, y)
@default_square: True or False
if True:
default crop_size = (112, 112)
else:
default crop_size = (96, 112);
!!! make sure, if output_size is not None:
(output_size - outer_padding)
= some_scale * (default crop_size * (1.0 + inner_padding_factor))
Returns:
----------
@reference_5point: 5x2 np.array
each row is a pair of transformed coordinates (x, y)
"""
# print('\n===> get_reference_facial_points():')
# print('---> Params:')
# print(' output_size: ', output_size)
# print(' inner_padding_factor: ', inner_padding_factor)
# print(' outer_padding:', outer_padding)
# print(' default_square: ', default_square)
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
# 0) make the inner region a square
if default_square:
size_diff = max(tmp_crop_size) - tmp_crop_size
tmp_5pts += size_diff / 2
tmp_crop_size += size_diff
# print('---> default:')
# print(' crop_size = ', tmp_crop_size)
# print(' reference_5pts = ', tmp_5pts)
if (output_size and
output_size[0] == tmp_crop_size[0] and
output_size[1] == tmp_crop_size[1]):
# print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size))
return tmp_5pts
if (inner_padding_factor == 0 and
outer_padding == (0, 0)):
if output_size is None:
# print('No paddings to do: return default reference points')
return tmp_5pts
else:
raise FaceWarpException(
'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
# check output size
if not (0 <= inner_padding_factor <= 1.0):
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0)
and output_size is None):
output_size = tmp_crop_size * \
(1 + inner_padding_factor * 2).astype(np.int32)
output_size += np.array(outer_padding)
# print(' deduced from paddings, output_size = ', output_size)
if not (outer_padding[0] < output_size[0]
and outer_padding[1] < output_size[1]):
raise FaceWarpException('Not (outer_padding[0] < output_size[0]'
'and outer_padding[1] < output_size[1])')
# 1) pad the inner region according inner_padding_factor
# print('---> STEP1: pad the inner region according inner_padding_factor')
if inner_padding_factor > 0:
size_diff = tmp_crop_size * inner_padding_factor * 2
tmp_5pts += size_diff / 2
tmp_crop_size += np.round(size_diff).astype(np.int32)
# print(' crop_size = ', tmp_crop_size)
# print(' reference_5pts = ', tmp_5pts)
# 2) resize the padded inner region
# print('---> STEP2: resize the padded inner region')
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
# print(' crop_size = ', tmp_crop_size)
# print(' size_bf_outer_pad = ', size_bf_outer_pad)
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
raise FaceWarpException('Must have (output_size - outer_padding)'
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
# print(' resize scale_factor = ', scale_factor)
tmp_5pts = tmp_5pts * scale_factor
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
# tmp_5pts = tmp_5pts + size_diff / 2
tmp_crop_size = size_bf_outer_pad
# print(' crop_size = ', tmp_crop_size)
# print(' reference_5pts = ', tmp_5pts)
# 3) add outer_padding to make output_size
reference_5point = tmp_5pts + np.array(outer_padding)
tmp_crop_size = output_size
# print('---> STEP3: add outer_padding to make output_size')
# print(' crop_size = ', tmp_crop_size)
# print(' reference_5pts = ', tmp_5pts)
# print('===> end get_reference_facial_points\n')
return reference_5point
def get_affine_transform_matrix(src_pts, dst_pts):
"""
Function:
----------
get affine transform matrix 'tfm' from src_pts to dst_pts
Parameters:
----------
@src_pts: Kx2 np.array
source points matrix, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points matrix, each row is a pair of coordinates (x, y)
Returns:
----------
@tfm: 2x3 np.array
transform matrix from src_pts to dst_pts
"""
tfm = np.float32([[1, 0, 0], [0, 1, 0]])
n_pts = src_pts.shape[0]
ones = np.ones((n_pts, 1), src_pts.dtype)
src_pts_ = np.hstack([src_pts, ones])
dst_pts_ = np.hstack([dst_pts, ones])
# #print(('src_pts_:\n' + str(src_pts_))
# #print(('dst_pts_:\n' + str(dst_pts_))
A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
# #print(('np.linalg.lstsq return A: \n' + str(A))
# #print(('np.linalg.lstsq return res: \n' + str(res))
# #print(('np.linalg.lstsq return rank: \n' + str(rank))
# #print(('np.linalg.lstsq return s: \n' + str(s))
if rank == 3:
tfm = np.float32([
[A[0, 0], A[1, 0], A[2, 0]],
[A[0, 1], A[1, 1], A[2, 1]]
])
elif rank == 2:
tfm = np.float32([
[A[0, 0], A[1, 0], 0],
[A[0, 1], A[1, 1], 0]
])
return tfm
def warp_and_crop_face(src_img,
facial_pts,
reference_pts=None,
crop_size=(96, 112),
align_type='smilarity'):
"""
Function:
----------
apply affine transform 'trans' to uv
Parameters:
----------
@src_img: 3x3 np.array
input image
@facial_pts: could be
1)a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
@reference_pts: could be
1) a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
or
3) None
if None, use default reference facial points
@crop_size: (w, h)
output face image size
@align_type: transform type, could be one of
1) 'similarity': use similarity transform
2) 'cv2_affine': use the first 3 points to do affine transform,
by calling cv2.getAffineTransform()
3) 'affine': use all points to do affine transform
Returns:
----------
@face_img: output face image with size (w, h) = @crop_size
"""
if reference_pts is None:
if crop_size[0] == 96 and crop_size[1] == 112:
reference_pts = REFERENCE_FACIAL_POINTS
else:
default_square = False
inner_padding_factor = 0
outer_padding = (0, 0)
output_size = crop_size
reference_pts = get_reference_facial_points(output_size,
inner_padding_factor,
outer_padding,
default_square)
ref_pts = np.float32(reference_pts)
ref_pts_shp = ref_pts.shape
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
raise FaceWarpException(
'reference_pts.shape must be (K,2) or (2,K) and K>2')
if ref_pts_shp[0] == 2:
ref_pts = ref_pts.T
src_pts = np.float32(facial_pts)
src_pts_shp = src_pts.shape
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
raise FaceWarpException(
'facial_pts.shape must be (K,2) or (2,K) and K>2')
if src_pts_shp[0] == 2:
src_pts = src_pts.T
# #print('--->src_pts:\n', src_pts
# #print('--->ref_pts\n', ref_pts
if src_pts.shape != ref_pts.shape:
raise FaceWarpException(
'facial_pts and reference_pts must have the same shape')
if align_type is 'cv2_affine':
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
# #print(('cv2.getAffineTransform() returns tfm=\n' + str(tfm))
elif align_type is 'affine':
tfm = get_affine_transform_matrix(src_pts, ref_pts)
# #print(('get_affine_transform_matrix() returns tfm=\n' + str(tfm))
else:
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
# #print(('get_similarity_transform_for_cv2() returns tfm=\n' + str(tfm))
# #print('--->Transform matrix: '
# #print(('type(tfm):' + str(type(tfm)))
# #print(('tfm.dtype:' + str(tfm.dtype))
# #print( tfm
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
return face_img, tfm
| 11,036 | 35.186885 | 109 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/box_utils.py | import numpy as np
from PIL import Image
def nms(boxes, overlap_threshold=0.5, mode='union'):
"""Non-maximum suppression.
Arguments:
boxes: a float numpy array of shape [n, 5],
where each row is (xmin, ymin, xmax, ymax, score).
overlap_threshold: a float number.
mode: 'union' or 'min'.
Returns:
list with indices of the selected boxes
"""
# if there are no boxes, return the empty list
if len(boxes) == 0:
return []
# list of picked indices
pick = []
# grab the coordinates of the bounding boxes
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
area = (x2 - x1 + 1.0) * (y2 - y1 + 1.0)
ids = np.argsort(score) # in increasing order
while len(ids) > 0:
# grab index of the largest value
last = len(ids) - 1
i = ids[last]
pick.append(i)
# compute intersections
# of the box with the largest score
# with the rest of boxes
# left top corner of intersection boxes
ix1 = np.maximum(x1[i], x1[ids[:last]])
iy1 = np.maximum(y1[i], y1[ids[:last]])
# right bottom corner of intersection boxes
ix2 = np.minimum(x2[i], x2[ids[:last]])
iy2 = np.minimum(y2[i], y2[ids[:last]])
# width and height of intersection boxes
w = np.maximum(0.0, ix2 - ix1 + 1.0)
h = np.maximum(0.0, iy2 - iy1 + 1.0)
# intersections' areas
inter = w * h
if mode == 'min':
overlap = inter / np.minimum(area[i], area[ids[:last]])
elif mode == 'union':
# intersection over union (IoU)
overlap = inter / (area[i] + area[ids[:last]] - inter)
# delete all boxes where overlap is too big
ids = np.delete(
ids,
np.concatenate([[last], np.where(overlap > overlap_threshold)[0]])
)
return pick
def convert_to_square(bboxes):
"""Convert bounding boxes to a square form.
Arguments:
bboxes: a float numpy array of shape [n, 5].
Returns:
a float numpy array of shape [n, 5],
squared bounding boxes.
"""
square_bboxes = np.zeros_like(bboxes)
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
h = y2 - y1 + 1.0
w = x2 - x1 + 1.0
max_side = np.maximum(h, w)
square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5
square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5
square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
return square_bboxes
def calibrate_box(bboxes, offsets):
"""Transform bounding boxes to be more like true bounding boxes.
'offsets' is one of the outputs of the nets.
Arguments:
bboxes: a float numpy array of shape [n, 5].
offsets: a float numpy array of shape [n, 4].
Returns:
a float numpy array of shape [n, 5].
"""
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
w = x2 - x1 + 1.0
h = y2 - y1 + 1.0
w = np.expand_dims(w, 1)
h = np.expand_dims(h, 1)
# this is what happening here:
# tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)]
# x1_true = x1 + tx1*w
# y1_true = y1 + ty1*h
# x2_true = x2 + tx2*w
# y2_true = y2 + ty2*h
# below is just more compact form of this
# are offsets always such that
# x1 < x2 and y1 < y2 ?
translation = np.hstack([w, h, w, h]) * offsets
bboxes[:, 0:4] = bboxes[:, 0:4] + translation
return bboxes
def get_image_boxes(bounding_boxes, img, size=24):
"""Cut out boxes from the image.
Arguments:
bounding_boxes: a float numpy array of shape [n, 5].
img: an instance of PIL.Image.
size: an integer, size of cutouts.
Returns:
a float numpy array of shape [n, 3, size, size].
"""
num_boxes = len(bounding_boxes)
width, height = img.size
[dy, edy, dx, edx, y, ey, x, ex, w, h] = correct_bboxes(bounding_boxes, width, height)
img_boxes = np.zeros((num_boxes, 3, size, size), 'float32')
for i in range(num_boxes):
img_box = np.zeros((h[i], w[i], 3), 'uint8')
img_array = np.asarray(img, 'uint8')
img_box[dy[i]:(edy[i] + 1), dx[i]:(edx[i] + 1), :] = \
img_array[y[i]:(ey[i] + 1), x[i]:(ex[i] + 1), :]
# resize
img_box = Image.fromarray(img_box)
img_box = img_box.resize((size, size), Image.BILINEAR)
img_box = np.asarray(img_box, 'float32')
img_boxes[i, :, :, :] = _preprocess(img_box)
return img_boxes
def correct_bboxes(bboxes, width, height):
"""Crop boxes that are too big and get coordinates
with respect to cutouts.
Arguments:
bboxes: a float numpy array of shape [n, 5],
where each row is (xmin, ymin, xmax, ymax, score).
width: a float number.
height: a float number.
Returns:
dy, dx, edy, edx: a int numpy arrays of shape [n],
coordinates of the boxes with respect to the cutouts.
y, x, ey, ex: a int numpy arrays of shape [n],
corrected ymin, xmin, ymax, xmax.
h, w: a int numpy arrays of shape [n],
just heights and widths of boxes.
in the following order:
[dy, edy, dx, edx, y, ey, x, ex, w, h].
"""
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
w, h = x2 - x1 + 1.0, y2 - y1 + 1.0
num_boxes = bboxes.shape[0]
# 'e' stands for end
# (x, y) -> (ex, ey)
x, y, ex, ey = x1, y1, x2, y2
# we need to cut out a box from the image.
# (x, y, ex, ey) are corrected coordinates of the box
# in the image.
# (dx, dy, edx, edy) are coordinates of the box in the cutout
# from the image.
dx, dy = np.zeros((num_boxes,)), np.zeros((num_boxes,))
edx, edy = w.copy() - 1.0, h.copy() - 1.0
# if box's bottom right corner is too far right
ind = np.where(ex > width - 1.0)[0]
edx[ind] = w[ind] + width - 2.0 - ex[ind]
ex[ind] = width - 1.0
# if box's bottom right corner is too low
ind = np.where(ey > height - 1.0)[0]
edy[ind] = h[ind] + height - 2.0 - ey[ind]
ey[ind] = height - 1.0
# if box's top left corner is too far left
ind = np.where(x < 0.0)[0]
dx[ind] = 0.0 - x[ind]
x[ind] = 0.0
# if box's top left corner is too high
ind = np.where(y < 0.0)[0]
dy[ind] = 0.0 - y[ind]
y[ind] = 0.0
return_list = [dy, edy, dx, edx, y, ey, x, ex, w, h]
return_list = [i.astype('int32') for i in return_list]
return return_list
def _preprocess(img):
"""Preprocessing step before feeding the network.
Arguments:
img: a float numpy array of shape [h, w, c].
Returns:
a float numpy array of shape [1, c, h, w].
"""
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, 0)
img = (img - 127.5) * 0.0078125
return img
| 6,936 | 28.025105 | 90 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/detector.py | import numpy as np
import torch
from .get_nets import PNet, RNet, ONet
from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square
from .first_stage import run_first_stage
def detect_faces(image, min_face_size=20.0,
thresholds=[0.6, 0.7, 0.8],
nms_thresholds=[0.7, 0.7, 0.7]):
"""
Arguments:
image: an instance of PIL.Image.
min_face_size: a float number.
thresholds: a list of length 3.
nms_thresholds: a list of length 3.
Returns:
two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
bounding boxes and facial landmarks.
"""
# LOAD MODELS
pnet = PNet()
rnet = RNet()
onet = ONet()
onet.eval()
# BUILD AN IMAGE PYRAMID
width, height = image.size
min_length = min(height, width)
min_detection_size = 12
factor = 0.707 # sqrt(0.5)
# scales for scaling the image
scales = []
# scales the image so that
# minimum size that we can detect equals to
# minimum face size that we want to detect
m = min_detection_size / min_face_size
min_length *= m
factor_count = 0
while min_length > min_detection_size:
scales.append(m * factor ** factor_count)
min_length *= factor
factor_count += 1
# STAGE 1
# it will be returned
bounding_boxes = []
with torch.no_grad():
# run P-Net on different scales
for s in scales:
boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0])
bounding_boxes.append(boxes)
# collect boxes (and offsets, and scores) from different scales
bounding_boxes = [i for i in bounding_boxes if i is not None]
bounding_boxes = np.vstack(bounding_boxes)
keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])
bounding_boxes = bounding_boxes[keep]
# use offsets predicted by pnet to transform bounding boxes
bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
# shape [n_boxes, 5]
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 2
img_boxes = get_image_boxes(bounding_boxes, image, size=24)
img_boxes = torch.FloatTensor(img_boxes)
output = rnet(img_boxes)
offsets = output[0].data.numpy() # shape [n_boxes, 4]
probs = output[1].data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[1])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
keep = nms(bounding_boxes, nms_thresholds[1])
bounding_boxes = bounding_boxes[keep]
bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])
bounding_boxes = convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
# STAGE 3
img_boxes = get_image_boxes(bounding_boxes, image, size=48)
if len(img_boxes) == 0:
return [], []
img_boxes = torch.FloatTensor(img_boxes)
output = onet(img_boxes)
landmarks = output[0].data.numpy() # shape [n_boxes, 10]
offsets = output[1].data.numpy() # shape [n_boxes, 4]
probs = output[2].data.numpy() # shape [n_boxes, 2]
keep = np.where(probs[:, 1] > thresholds[2])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
offsets = offsets[keep]
landmarks = landmarks[keep]
# compute landmark points
width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5]
landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10]
bounding_boxes = calibrate_box(bounding_boxes, offsets)
keep = nms(bounding_boxes, nms_thresholds[2], mode='min')
bounding_boxes = bounding_boxes[keep]
landmarks = landmarks[keep]
return bounding_boxes, landmarks
| 4,333 | 33.396825 | 101 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/first_stage.py | import torch
import math
from PIL import Image
import numpy as np
from .box_utils import nms, _preprocess
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = 'cuda:0'
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
# scale the image and convert it to a float array
width, height = image.size
sw, sh = math.ceil(width * scale), math.ceil(height * scale)
img = image.resize((sw, sh), Image.BILINEAR)
img = np.asarray(img, 'float32')
img = torch.FloatTensor(_preprocess(img)).to(device)
with torch.no_grad():
output = net(img)
probs = output[1].cpu().data.numpy()[0, 1, :, :]
offsets = output[0].cpu().data.numpy()
# probs: probability of a face at each sliding window
# offsets: transformations to true bounding boxes
boxes = _generate_bboxes(probs, offsets, scale, threshold)
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
def _generate_bboxes(probs, offsets, scale, threshold):
"""Generate bounding boxes at places
where there is probably a face.
Arguments:
probs: a float numpy array of shape [n, m].
offsets: a float numpy array of shape [1, 4, n, m].
scale: a float number,
width and height of the image were scaled by this number.
threshold: a float number.
Returns:
a float numpy array of shape [n_boxes, 9]
"""
# applying P-Net is equivalent, in some sense, to
# moving 12x12 window with stride 2
stride = 2
cell_size = 12
# indices of boxes where there is probably a face
inds = np.where(probs > threshold)
if inds[0].size == 0:
return np.array([])
# transformations of bounding boxes
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
# they are defined as:
# w = x2 - x1 + 1
# h = y2 - y1 + 1
# x1_true = x1 + tx1*w
# x2_true = x2 + tx2*w
# y1_true = y1 + ty1*h
# y2_true = y2 + ty2*h
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
# P-Net is applied to scaled images
# so we need to rescale bounding boxes back
bounding_boxes = np.vstack([
np.round((stride * inds[1] + 1.0) / scale),
np.round((stride * inds[0] + 1.0) / scale),
np.round((stride * inds[1] + 1.0 + cell_size) / scale),
np.round((stride * inds[0] + 1.0 + cell_size) / scale),
score, offsets
])
# why one is added?
return bounding_boxes.T
| 3,147 | 30.168317 | 76 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/get_nets.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import numpy as np
from configs.paths_config import model_paths
PNET_PATH = model_paths["mtcnn_pnet"]
ONET_PATH = model_paths["mtcnn_onet"]
RNET_PATH = model_paths["mtcnn_rnet"]
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, c, h, w].
Returns:
a float tensor with shape [batch_size, c*h*w].
"""
# without this pretrained model isn't working
x = x.transpose(3, 2).contiguous()
return x.view(x.size(0), -1)
class PNet(nn.Module):
def __init__(self):
super().__init__()
# suppose we have input with size HxW, then
# after first layer: H - 2,
# after pool: ceil((H - 2)/2),
# after second conv: ceil((H - 2)/2) - 2,
# after last conv: ceil((H - 2)/2) - 4,
# and the same for W
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 10, 3, 1)),
('prelu1', nn.PReLU(10)),
('pool1', nn.MaxPool2d(2, 2, ceil_mode=True)),
('conv2', nn.Conv2d(10, 16, 3, 1)),
('prelu2', nn.PReLU(16)),
('conv3', nn.Conv2d(16, 32, 3, 1)),
('prelu3', nn.PReLU(32))
]))
self.conv4_1 = nn.Conv2d(32, 2, 1, 1)
self.conv4_2 = nn.Conv2d(32, 4, 1, 1)
weights = np.load(PNET_PATH, allow_pickle=True)[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
b: a float tensor with shape [batch_size, 4, h', w'].
a: a float tensor with shape [batch_size, 2, h', w'].
"""
x = self.features(x)
a = self.conv4_1(x)
b = self.conv4_2(x)
a = F.softmax(a, dim=-1)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 28, 3, 1)),
('prelu1', nn.PReLU(28)),
('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv2', nn.Conv2d(28, 48, 3, 1)),
('prelu2', nn.PReLU(48)),
('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv3', nn.Conv2d(48, 64, 2, 1)),
('prelu3', nn.PReLU(64)),
('flatten', Flatten()),
('conv4', nn.Linear(576, 128)),
('prelu4', nn.PReLU(128))
]))
self.conv5_1 = nn.Linear(128, 2)
self.conv5_2 = nn.Linear(128, 4)
weights = np.load(RNET_PATH, allow_pickle=True)[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
b: a float tensor with shape [batch_size, 4].
a: a float tensor with shape [batch_size, 2].
"""
x = self.features(x)
a = self.conv5_1(x)
b = self.conv5_2(x)
a = F.softmax(a, dim=-1)
return b, a
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 32, 3, 1)),
('prelu1', nn.PReLU(32)),
('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv2', nn.Conv2d(32, 64, 3, 1)),
('prelu2', nn.PReLU(64)),
('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv3', nn.Conv2d(64, 64, 3, 1)),
('prelu3', nn.PReLU(64)),
('pool3', nn.MaxPool2d(2, 2, ceil_mode=True)),
('conv4', nn.Conv2d(64, 128, 2, 1)),
('prelu4', nn.PReLU(128)),
('flatten', Flatten()),
('conv5', nn.Linear(1152, 256)),
('drop5', nn.Dropout(0.25)),
('prelu5', nn.PReLU(256)),
]))
self.conv6_1 = nn.Linear(256, 2)
self.conv6_2 = nn.Linear(256, 4)
self.conv6_3 = nn.Linear(256, 10)
weights = np.load(ONET_PATH, allow_pickle=True)[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
c: a float tensor with shape [batch_size, 10].
b: a float tensor with shape [batch_size, 4].
a: a float tensor with shape [batch_size, 2].
"""
x = self.features(x)
a = self.conv6_1(x)
b = self.conv6_2(x)
c = self.conv6_3(x)
a = F.softmax(a, dim=-1)
return c, b, a
| 4,995 | 28.046512 | 65 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/matlab_cp2tform.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 06:54:28 2017
@author: zhaoyafei
"""
import numpy as np
from numpy.linalg import inv, norm, lstsq
from numpy.linalg import matrix_rank as rank
class MatlabCp2tormException(Exception):
def __str__(self):
return 'In File {}:{}'.format(
__file__, super.__str__(self))
def tformfwd(trans, uv):
"""
Function:
----------
apply affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of transformed coordinates (x, y)
"""
uv = np.hstack((
uv, np.ones((uv.shape[0], 1))
))
xy = np.dot(uv, trans)
xy = xy[:, 0:-1]
return xy
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy
def findNonreflectiveSimilarity(uv, xy, options=None):
options = {'K': 2}
K = options['K']
M = xy.shape[0]
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
# print('--->x, y:\n', x, y
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
X = np.vstack((tmp1, tmp2))
# print('--->X.shape: ', X.shape
# print('X:\n', X
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
U = np.vstack((u, v))
# print('--->U.shape: ', U.shape
# print('U:\n', U
# We know that X * r = U
if rank(X) >= 2 * K:
r, _, _, _ = lstsq(X, U, rcond=None) # Make sure this is what I want
r = np.squeeze(r)
else:
raise Exception('cp2tform:twoUniquePointsReq')
# print('--->r:\n', r
sc = r[0]
ss = r[1]
tx = r[2]
ty = r[3]
Tinv = np.array([
[sc, -ss, 0],
[ss, sc, 0],
[tx, ty, 1]
])
# print('--->Tinv:\n', Tinv
T = inv(Tinv)
# print('--->T:\n', T
T[:, 2] = np.array([0, 0, 1])
return T, Tinv
def findSimilarity(uv, xy, options=None):
options = {'K': 2}
# uv = np.array(uv)
# xy = np.array(xy)
# Solve for trans1
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
# Solve for trans2
# manually reflect the xy data across the Y-axis
xyR = xy
xyR[:, 0] = -1 * xyR[:, 0]
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
# manually reflect the tform to undo the reflection done on xyR
TreflectY = np.array([
[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
trans2 = np.dot(trans2r, TreflectY)
# Figure out if trans1 or trans2 is better
xy1 = tformfwd(trans1, uv)
norm1 = norm(xy1 - xy)
xy2 = tformfwd(trans2, uv)
norm2 = norm(xy2 - xy)
if norm1 <= norm2:
return trans1, trans1_inv
else:
trans2_inv = inv(trans2)
return trans2, trans2_inv
def get_similarity_transform(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'trans':
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y, 1] = [u, v, 1] * trans
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
@reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
trans_inv: 3x3 np.array
inverse of trans, transform matrix from xy to uv
"""
if reflective:
trans, trans_inv = findSimilarity(src_pts, dst_pts)
else:
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
return trans, trans_inv
def cvt_tform_mat_for_cv2(trans):
"""
Function:
----------
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
directly used by cv2.warpAffine():
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y].T = cv_trans * [u, v, 1].T
Parameters:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
Returns:
----------
@cv2_trans: 2x3 np.array
transform matrix from src_pts to dst_pts, could be directly used
for cv2.warpAffine()
"""
cv2_trans = trans[:, 0:2].T
return cv2_trans
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'cv2_trans' which could be
directly used by cv2.warpAffine():
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y].T = cv_trans * [u, v, 1].T
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@cv2_trans: 2x3 np.array
transform matrix from src_pts to dst_pts, could be directly used
for cv2.warpAffine()
"""
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
cv2_trans = cvt_tform_mat_for_cv2(trans)
return cv2_trans
if __name__ == '__main__':
"""
u = [0, 6, -2]
v = [0, 3, 5]
x = [-1, 0, 4]
y = [-1, -10, 4]
# In Matlab, run:
#
# uv = [u'; v'];
# xy = [x'; y'];
# tform_sim=cp2tform(uv,xy,'similarity');
#
# trans = tform_sim.tdata.T
# ans =
# -0.0764 -1.6190 0
# 1.6190 -0.0764 0
# -3.2156 0.0290 1.0000
# trans_inv = tform_sim.tdata.Tinv
# ans =
#
# -0.0291 0.6163 0
# -0.6163 -0.0291 0
# -0.0756 1.9826 1.0000
# xy_m=tformfwd(tform_sim, u,v)
#
# xy_m =
#
# -3.2156 0.0290
# 1.1833 -9.9143
# 5.0323 2.8853
# uv_m=tforminv(tform_sim, x,y)
#
# uv_m =
#
# 0.5698 1.3953
# 6.0872 2.2733
# -2.6570 4.3314
"""
u = [0, 6, -2]
v = [0, 3, 5]
x = [-1, 0, 4]
y = [-1, -10, 4]
uv = np.array((u, v)).T
xy = np.array((x, y)).T
print('\n--->uv:')
print(uv)
print('\n--->xy:')
print(xy)
trans, trans_inv = get_similarity_transform(uv, xy)
print('\n--->trans matrix:')
print(trans)
print('\n--->trans_inv matrix:')
print(trans_inv)
print('\n---> apply transform to uv')
print('\nxy_m = uv_augmented * trans')
uv_aug = np.hstack((
uv, np.ones((uv.shape[0], 1))
))
xy_m = np.dot(uv_aug, trans)
print(xy_m)
print('\nxy_m = tformfwd(trans, uv)')
xy_m = tformfwd(trans, uv)
print(xy_m)
print('\n---> apply inverse transform to xy')
print('\nuv_m = xy_augmented * trans_inv')
xy_aug = np.hstack((
xy, np.ones((xy.shape[0], 1))
))
uv_m = np.dot(xy_aug, trans_inv)
print(uv_m)
print('\nuv_m = tformfwd(trans_inv, xy)')
uv_m = tformfwd(trans_inv, xy)
print(uv_m)
uv_m = tforminv(trans, xy)
print('\nuv_m = tforminv(trans, xy)')
print(uv_m)
| 8,562 | 23.396011 | 77 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/mtcnn/mtcnn_pytorch/src/visualization_utils.py | from PIL import ImageDraw
def show_bboxes(img, bounding_boxes, facial_landmarks=[]):
"""Draw bounding boxes and facial landmarks.
Arguments:
img: an instance of PIL.Image.
bounding_boxes: a float numpy array of shape [n, 5].
facial_landmarks: a float numpy array of shape [n, 10].
Returns:
an instance of PIL.Image.
"""
img_copy = img.copy()
draw = ImageDraw.Draw(img_copy)
for b in bounding_boxes:
draw.rectangle([
(b[0], b[1]), (b[2], b[3])
], outline='white')
for p in facial_landmarks:
for i in range(5):
draw.ellipse([
(p[i] - 1.0, p[i + 5] - 1.0),
(p[i] + 1.0, p[i + 5] + 1.0)
], outline='blue')
return img_copy
| 786 | 23.59375 | 63 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/model.py | import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class Downsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(
input, self.weight * self.scale, bias=self.bias * self.lr_mul
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
f'upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=upsample,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.noise = NoiseInjection()
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
# self.activate = ScaledLeakyReLU(0.2)
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
# out = out + self.bias
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.size = size
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(
EqualLinear(
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
)
)
self.style = nn.Sequential(*layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyledConv(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.num_layers = (self.log_size - 2) * 2 + 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
in_channel = self.channels[4]
for layer_idx in range(self.num_layers):
res = (layer_idx + 5) // 2
shape = [1, 1, 2 ** res, 2 ** res]
self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.convs.append(
StyledConv(
in_channel,
out_channel,
3,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
)
)
self.convs.append(
StyledConv(
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
)
)
self.to_rgbs.append(ToRGB(out_channel, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
def make_noise(self):
device = self.input.input.device
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
for i in range(3, self.log_size + 1):
for _ in range(2):
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim, device=self.input.input.device
)
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def forward(
self,
styles,
return_latents=False,
return_features=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
randomize_noise=True,
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers
else:
noise = [
getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
]
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if return_latents:
return image, latent
elif return_features:
return image, out
else:
return image, None
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
class Discriminator(nn.Module):
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
EqualLinear(channels[4], 1),
)
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
group = min(batch, self.stddev_group)
stddev = out.view(
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
out = torch.cat([out, stddev], 1)
out = self.final_conv(out)
out = out.view(batch, -1)
out = self.final_linear(out)
return out
| 18,559 | 26.537092 | 100 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/op/__init__.py | from .fused_act import FusedLeakyReLU, fused_leaky_relu
from .upfirdn2d import upfirdn2d
| 89 | 29 | 55 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/op/fused_act.py | import os
import torch
from torch import nn
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
fused = load(
'fused',
sources=[
os.path.join(module_path, 'fused_bias_act.cpp'),
os.path.join(module_path, 'fused_bias_act_kernel.cu'),
],
)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
| 2,378 | 26.662791 | 83 | py |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/op/fused_bias_act.cpp | #include <torch/extension.h>
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale);
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
int act, int grad, float alpha, float scale) {
CHECK_CUDA(input);
CHECK_CUDA(bias);
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
} | 826 | 38.380952 | 114 | cpp |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/op/upfirdn2d.cpp | #include <torch/extension.h>
torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
int up_x, int up_y, int down_x, int down_y,
int pad_x0, int pad_x1, int pad_y0, int pad_y1);
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
int up_x, int up_y, int down_x, int down_y,
int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
CHECK_CUDA(input);
CHECK_CUDA(kernel);
return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
} | 966 | 41.043478 | 99 | cpp |
IID_representation_learning | IID_representation_learning-master/restyle/models/stylegan2/op/upfirdn2d.py | import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'upfirdn2d.cpp'),
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
],
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
return out[:, ::down_y, ::down_x, :]
| 5,203 | 27.12973 | 108 | py |
IID_representation_learning | IID_representation_learning-master/restyle/options/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/options/e4e_train_options.py | from options.train_options import TrainOptions
class e4eTrainOptions(TrainOptions):
def __init__(self):
super(e4eTrainOptions, self).__init__()
def initialize(self):
super(e4eTrainOptions, self).initialize()
self.parser.add_argument('--w_discriminator_lambda', default=0, type=float,
help='Dw loss multiplier')
self.parser.add_argument('--w_discriminator_lr', default=2e-5, type=float,
help='Dw learning rate')
self.parser.add_argument("--r1", type=float, default=10,
help="weight of the r1 regularization")
self.parser.add_argument("--d_reg_every", type=int, default=16,
help="interval for applying r1 regularization")
self.parser.add_argument('--use_w_pool', action='store_true',
help='Whether to store a latnet codes pool for the discriminator\'s training')
self.parser.add_argument("--w_pool_size", type=int, default=50,
help="W\'s pool size, depends on --use_w_pool")
# e4e_modules specific
self.parser.add_argument('--delta_norm', type=int, default=2,
help="norm type of the deltas")
self.parser.add_argument('--delta_norm_lambda', type=float, default=2e-4,
help="lambda for delta norm loss")
# Progressive training
self.parser.add_argument('--progressive_steps', nargs='+', type=int, default=None,
help="The training steps of training new deltas. steps[i] starts the delta_i training")
self.parser.add_argument('--progressive_start', type=int, default=None,
help="The training step to start training the deltas, overrides progressive_steps")
self.parser.add_argument('--progressive_step_every', type=int, default=2_000,
help="Amount of training steps for each progressive step")
# Save additional training info to enable future training continuation from produced checkpoints
self.parser.add_argument('--save_training_data', action='store_true',
help='Save intermediate training data to resume training from the checkpoint')
self.parser.add_argument('--sub_exp_dir', default=None, type=str,
help='Name of sub experiment directory')
self.parser.add_argument('--resume_training_from_ckpt', default=None, type=str,
help='Path to training checkpoint, works when --save_training_data was set to True')
self.parser.add_argument('--update_param_list', nargs='+', type=str, default=None,
help="Name of training parameters to update the loaded training checkpoint")
def parse(self):
opts = self.parser.parse_args()
return opts
| 3,016 | 58.156863 | 120 | py |
IID_representation_learning | IID_representation_learning-master/restyle/options/test_options.py | from argparse import ArgumentParser
class TestOptions:
def __init__(self):
self.parser = ArgumentParser()
self.initialize()
def initialize(self):
# arguments for inference script
self.parser.add_argument('--exp_dir', type=str,
help='Path to experiment output directory')
self.parser.add_argument('--checkpoint_path', default=None, type=str,
help='Path to ReStyle model checkpoint')
self.parser.add_argument('--data_path', type=str, default='gt_images',
help='Path to directory of images to evaluate')
self.parser.add_argument('--resize_outputs', action='store_true',
help='Whether to resize outputs to 256x256 or keep at original output resolution')
self.parser.add_argument('--test_batch_size', default=2, type=int,
help='Batch size for testing and inference')
self.parser.add_argument('--test_workers', default=2, type=int,
help='Number of test/inference dataloader workers')
self.parser.add_argument('--n_images', type=int, default=None,
help='Number of images to output. If None, run on all data')
# arguments for iterative inference
self.parser.add_argument('--n_iters_per_batch', default=5, type=int,
help='Number of forward passes per batch during training.')
# arguments for encoder bootstrapping
self.parser.add_argument('--model_1_checkpoint_path', default=None, type=str,
help='Path to encoder used to initialize encoder bootstrapping inference.')
self.parser.add_argument('--model_2_checkpoint_path', default=None, type=str,
help='Path to encoder used to iteratively translate images following '
'model 1\'s initialization.')
# arguments for editing
self.parser.add_argument('--edit_directions', type=str, default='age,smile,pose',
help='comma-separated list of which edit directions top perform.')
self.parser.add_argument('--factor_ranges', type=str, default='5,5,5',
help='comma-separated list of max ranges for each corresponding edit.')
def parse(self):
opts = self.parser.parse_args()
return opts
| 2,530 | 51.729167 | 115 | py |
IID_representation_learning | IID_representation_learning-master/restyle/options/train_options.py | from argparse import ArgumentParser
class TrainOptions:
def __init__(self):
self.parser = ArgumentParser()
self.initialize()
def initialize(self):
# general setup
self.parser.add_argument('--exp_dir', type=str,
help='Path to experiment output directory')
self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str,
help='Type of dataset/experiment to run')
self.parser.add_argument('--encoder_type', default='BackboneEncoder', type=str,
help='Which encoder to use')
self.parser.add_argument('--input_nc', default=6, type=int,
help='Number of input image channels to the ReStyle encoder. Should be set to 6.')
self.parser.add_argument('--output_size', default=1024, type=int,
help='Output size of generator')
# batch size and dataloader works
self.parser.add_argument('--batch_size', default=4, type=int,
help='Batch size for training')
self.parser.add_argument('--test_batch_size', default=2, type=int,
help='Batch size for testing and inference')
self.parser.add_argument('--workers', default=4, type=int,
help='Number of train dataloader workers')
self.parser.add_argument('--test_workers', default=2, type=int,
help='Number of test/inference dataloader workers')
# optimizers
self.parser.add_argument('--learning_rate', default=0.0001, type=float,
help='Optimizer learning rate')
self.parser.add_argument('--optim_name', default='ranger', type=str,
help='Which optimizer to use')
self.parser.add_argument('--train_decoder', default=False, type=bool,
help='Whether to train the decoder model')
self.parser.add_argument('--start_from_latent_avg', action='store_true',
help='Whether to add average latent vector to generate codes from encoder.')
# loss lambdas
self.parser.add_argument('--lpips_lambda', default=0, type=float,
help='LPIPS loss multiplier factor')
self.parser.add_argument('--id_lambda', default=0, type=float,
help='ID loss multiplier factor')
self.parser.add_argument('--l2_lambda', default=0, type=float,
help='L2 loss multiplier factor')
self.parser.add_argument('--w_norm_lambda', default=0, type=float,
help='W-norm loss multiplier factor')
self.parser.add_argument('--moco_lambda', default=0, type=float,
help='Moco feature loss multiplier factor')
# weights and checkpoint paths
self.parser.add_argument('--stylegan_weights', default=None, type=str,
help='Path to StyleGAN model weights')
self.parser.add_argument('--checkpoint_path', default=None, type=str,
help='Path to ReStyle model checkpoint')
# intervals for logging, validation, and saving
self.parser.add_argument('--max_steps', default=500000, type=int,
help='Maximum number of training steps')
self.parser.add_argument('--image_interval', default=1000, type=int,
help='Interval for logging train images during training')
self.parser.add_argument('--board_interval', default=50, type=int,
help='Interval for logging metrics to tensorboard')
self.parser.add_argument('--val_interval', default=1000, type=int,
help='Validation interval')
self.parser.add_argument('--save_interval', default=None, type=int,
help='Model checkpoint interval')
# arguments for iterative encoding
self.parser.add_argument('--n_iters_per_batch', default=5, type=int,
help='Number of forward passes per batch during training')
# data split
self.parser.add_argument('--split_scheme', default='official', type=str,
choices=['official', '012', '120', '201'],
help='The split scheme used in wilds')
def parse(self):
opts = self.parser.parse_args()
return opts
| 4,690 | 54.188235 | 115 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/align_faces_parallel.py | """
brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
author: lzhbrian (https://lzhbrian.me)
date: 2020.1.5
note: code is heavily borrowed from
https://github.com/NVlabs/ffhq-dataset
http://dlib.net/face_landmark_detection.py.html
requirements:
apt install cmake
conda install Pillow numpy scipy
pip install dlib
# download face landmark model from:
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
"""
from argparse import ArgumentParser
import time
import numpy as np
import PIL
import PIL.Image
import os
import scipy
import scipy.ndimage
import dlib
import multiprocessing as mp
import math
from configs.paths_config import model_paths
SHAPE_PREDICTOR_PATH = model_paths["shape_predictor"]
def get_landmark(filepath, predictor):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(filepath)
dets = detector(img, 1)
shape = None
for k, d in enumerate(dets):
shape = predictor(img, d)
if not shape:
raise Exception("Could not find face in image! Please try another image!")
t = list(shape.parts())
a = []
for tt in t:
a.append([tt.x, tt.y])
lm = np.array(a)
return lm
def align_face(filepath, predictor, output_size=256, transform_size=256):
"""
:param filepath: str
:return: PIL Image
"""
lm = get_landmark(filepath, predictor)
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
lm_nostrils = lm[31: 36] # top-down
lm_eye_left = lm[36: 42] # left-clockwise
lm_eye_right = lm[42: 48] # left-clockwise
lm_mouth_outer = lm[48: 60] # left-clockwise
lm_mouth_inner = lm[60: 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# read image
img = PIL.Image.open(filepath)
enable_padding = True
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
return img
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def extract_on_paths(file_paths):
predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH)
pid = mp.current_process().name
print('\t{} is starting to extract on #{} images'.format(pid, len(file_paths)))
tot_count = len(file_paths)
count = 0
for file_path, res_path in file_paths:
count += 1
if count % 100 == 0:
print('{} done with {}/{}'.format(pid, count, tot_count))
try:
res = align_face(file_path, predictor)
res = res.convert('RGB')
os.makedirs(os.path.dirname(res_path), exist_ok=True)
res.save(res_path)
except Exception:
continue
print('\tDone!')
def parse_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--root_path', type=str, default='')
args = parser.parse_args()
return args
def run(args):
root_path = args.root_path
out_crops_path = root_path + '_crops'
if not os.path.exists(out_crops_path):
os.makedirs(out_crops_path, exist_ok=True)
file_paths = []
for root, dirs, files in os.walk(root_path):
for file in files:
file_path = os.path.join(root, file)
fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path))
res_path = '{}.jpg'.format(os.path.splitext(fname)[0])
if os.path.splitext(file_path)[1] == '.txt' or os.path.exists(res_path):
continue
file_paths.append((file_path, res_path))
file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads))))
print(len(file_chunks))
pool = mp.Pool(args.num_threads)
print('Running on {} paths\nHere we goooo'.format(len(file_paths)))
tic = time.time()
pool.map(extract_on_paths, file_chunks)
toc = time.time()
print('Mischief managed in {}s'.format(toc - tic))
if __name__ == '__main__':
args = parse_args()
run(args)
| 6,988 | 32.927184 | 117 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/calc_id_loss_parallel.py | from argparse import ArgumentParser
import time
import numpy as np
import os
import json
import sys
from PIL import Image
import multiprocessing as mp
import math
import torch
import torchvision.transforms as trans
sys.path.append(".")
sys.path.append("..")
from models.mtcnn.mtcnn import MTCNN
from models.encoders.model_irse import IR_101
from configs.paths_config import model_paths
CIRCULAR_FACE_PATH = model_paths['circular_face']
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def extract_on_paths(file_paths):
facenet = IR_101(input_size=112)
facenet.load_state_dict(torch.load(CIRCULAR_FACE_PATH))
facenet.cuda()
facenet.eval()
mtcnn = MTCNN()
id_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
pid = mp.current_process().name
print('\t{} is starting to extract on {} images'.format(pid, len(file_paths)))
tot_count = len(file_paths)
count = 0
scores_dict = {}
for res_path, gt_path in file_paths:
count += 1
if count % 100 == 0:
print('{} done with {}/{}'.format(pid, count, tot_count))
if True:
input_im = Image.open(res_path)
input_im, _ = mtcnn.align(input_im)
if input_im is None:
print('{} skipping {}'.format(pid, res_path))
continue
input_id = facenet(id_transform(input_im).unsqueeze(0).cuda())[0]
result_im = Image.open(gt_path)
result_im, _ = mtcnn.align(result_im)
if result_im is None:
print('{} skipping {}'.format(pid, gt_path))
continue
result_id = facenet(id_transform(result_im).unsqueeze(0).cuda())[0]
score = float(input_id.dot(result_id))
scores_dict[os.path.basename(gt_path)] = score
return scores_dict
def parse_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--num_threads', type=int, default=4)
parser.add_argument('--output_path', type=str, default='inference_results', help='path to inference outputs')
parser.add_argument('--gt_path', type=str, default='gt_images', help='path to gt images')
args = parser.parse_args()
return args
def run(args):
for step in sorted(os.listdir(args.output_path)):
if not step.isdigit():
continue
step_outputs_path = os.path.join(args.output_path, step)
if os.path.isdir(step_outputs_path):
print('#' * 80)
print(f'Running on step: {step}')
print('#' * 80)
run_on_step_output(step=step, args=args)
def run_on_step_output(step, args):
file_paths = []
step_outputs_path = os.path.join(args.output_path, step)
for f in os.listdir(step_outputs_path):
image_path = os.path.join(step_outputs_path, f)
gt_path = os.path.join(args.gt_path, f)
if f.endswith(".jpg") or f.endswith('.png') or f.endswith('.jpeg'):
file_paths.append([image_path, gt_path.replace('.png', '.jpg')])
file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads))))
pool = mp.Pool(args.num_threads)
print('Running on {} paths\nHere we goooo'.format(len(file_paths)))
tic = time.time()
results = pool.map(extract_on_paths, file_chunks)
scores_dict = {}
for d in results:
scores_dict.update(d)
all_scores = list(scores_dict.values())
mean = np.mean(all_scores)
std = np.std(all_scores)
result_str = 'New Average score is {:.2f}+-{:.2f}'.format(mean, std)
print(result_str)
out_path = os.path.join(os.path.dirname(args.output_path), 'inference_metrics')
if not os.path.exists(out_path):
os.makedirs(out_path)
with open(os.path.join(out_path, f'stat_id_step_{step}.txt'), 'w') as f:
f.write(result_str)
with open(os.path.join(out_path, f'scores_id_step_{step}.json'), 'w') as f:
json.dump(scores_dict, f)
toc = time.time()
print('Mischief managed in {}s'.format(toc - tic))
if __name__ == '__main__':
args = parse_args()
run(args)
| 3,804 | 27.609023 | 111 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/calc_losses_on_images.py | from argparse import ArgumentParser
import os
import json
import sys
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
sys.path.append(".")
sys.path.append("..")
from criteria.lpips.lpips import LPIPS
from datasets.gt_res_dataset import GTResDataset
def parse_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2'])
parser.add_argument('--output_path', type=str, default='results')
parser.add_argument('--gt_path', type=str, default='gt_images')
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--batch_size', type=int, default=4)
args = parser.parse_args()
return args
def run(args):
for step in sorted(os.listdir(args.output_path)):
if not step.isdigit():
continue
step_outputs_path = os.path.join(args.output_path, step)
if os.path.isdir(step_outputs_path):
print('#' * 80)
print(f'Running on step: {step}')
print('#' * 80)
run_on_step_output(step=step, args=args)
def run_on_step_output(step, args):
transform = transforms.Compose([transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
step_outputs_path = os.path.join(args.output_path, step)
print('Loading dataset')
dataset = GTResDataset(root_path=step_outputs_path,
gt_dir=args.gt_path,
transform=transform)
dataloader = DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=int(args.workers),
drop_last=True)
if args.mode == 'lpips':
loss_func = LPIPS(net_type='alex')
elif args.mode == 'l2':
loss_func = torch.nn.MSELoss()
else:
raise Exception('Not a valid mode!')
loss_func.cuda()
global_i = 0
scores_dict = {}
all_scores = []
for result_batch, gt_batch in tqdm(dataloader):
for i in range(args.batch_size):
loss = float(loss_func(result_batch[i:i+1].cuda(), gt_batch[i:i+1].cuda()))
all_scores.append(loss)
im_path = dataset.pairs[global_i][0]
scores_dict[os.path.basename(im_path)] = loss
global_i += 1
all_scores = list(scores_dict.values())
mean = np.mean(all_scores)
std = np.std(all_scores)
result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std)
print('Finished with ', step_outputs_path)
print(result_str)
out_path = os.path.join(os.path.dirname(args.output_path), 'inference_metrics')
if not os.path.exists(out_path):
os.makedirs(out_path)
with open(os.path.join(out_path, f'stat_{args.mode}_step_{step}.txt'), 'w') as f:
f.write(result_str)
with open(os.path.join(out_path, f'scores_{args.mode}_step_{step}.json'), 'w') as f:
json.dump(scores_dict, f)
if __name__ == '__main__':
args = parse_args()
run(args)
| 2,822 | 27.515152 | 85 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/encoder_bootstrapping_inference.py | import os
from argparse import Namespace
from tqdm import tqdm
import time
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
import sys
from utils.inference_utils import get_average_image
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from options.test_options import TestOptions
from models.psp import pSp
from models.e4e import e4e
from utils.model_utils import ENCODER_TYPES
from utils.common import tensor2im
def run():
test_opts = TestOptions().parse()
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results')
os.makedirs(out_path_results, exist_ok=True)
# load model used for initializing encoder bootstrapping
ckpt = torch.load(test_opts.model_1_checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts['checkpoint_path'] = test_opts.model_1_checkpoint_path
opts = Namespace(**opts)
if opts.encoder_type in ENCODER_TYPES['pSp']:
net1 = pSp(opts)
else:
net1 = e4e(opts)
net1.eval()
net1.cuda()
# load model used for translating input image after initialization
ckpt = torch.load(test_opts.model_2_checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts['checkpoint_path'] = test_opts.model_2_checkpoint_path
opts = Namespace(**opts)
if opts.encoder_type in ENCODER_TYPES['pSp']:
net2 = pSp(opts)
else:
net2 = e4e(opts)
net2.eval()
net2.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=False)
if opts.n_images is None:
opts.n_images = len(dataset)
# get the image corresponding to the latent average
avg_image = get_average_image(net1, opts)
resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size)
global_i = 0
global_time = []
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
result_batch = run_on_batch(input_cuda, net1, net2, opts, avg_image)
toc = time.time()
global_time.append(toc - tic)
for i in range(input_batch.shape[0]):
results = [tensor2im(result_batch[i][iter_idx]) for iter_idx in range(opts.n_iters_per_batch + 1)]
im_path = dataset.paths[global_i]
input_im = tensor2im(input_batch[i])
# save step-by-step results side-by-side
res = np.array(results[0].resize(resize_amount))
for idx, result in enumerate(results[1:]):
res = np.concatenate([res, np.array(result.resize(resize_amount))], axis=1)
res = np.concatenate([res, input_im.resize(resize_amount)], axis=1)
Image.fromarray(res).save(os.path.join(out_path_results, os.path.basename(im_path)))
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
def run_on_batch(inputs, net1, net2, opts, avg_image):
y_hat, latent = None, None
results_batch = {idx: [] for idx in range(inputs.shape[0])}
# initialize using the first net
avg_image_for_batch = avg_image.unsqueeze(0).repeat(inputs.shape[0], 1, 1, 1)
x_input = torch.cat([inputs, avg_image_for_batch], dim=1)
y_hat, latent = net1.forward(x_input,
latent=latent,
randomize_noise=False,
return_latents=True,
resize=opts.resize_outputs)
for idx in range(inputs.shape[0]):
results_batch[idx].append(y_hat[idx])
y_hat = net1.face_pool(y_hat)
# iteratively translate using the resulting latent and generated image
for iter in range(opts.n_iters_per_batch):
x_input = torch.cat([inputs, y_hat], dim=1)
y_hat, latent = net2.forward(x_input,
latent=latent,
randomize_noise=False,
return_latents=True,
resize=opts.resize_outputs)
for idx in range(inputs.shape[0]):
results_batch[idx].append(y_hat[idx])
y_hat = net1.face_pool(y_hat)
return results_batch
if __name__ == '__main__':
run()
| 5,208 | 34.678082 | 110 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/inference_iterative.py | import os
from argparse import Namespace
from tqdm import tqdm
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
import sys
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from options.test_options import TestOptions
from models.psp import pSp
from models.e4e import e4e
from utils.model_utils import ENCODER_TYPES
from utils.common import tensor2im
from utils.inference_utils import run_on_batch, get_average_image
def run():
test_opts = TestOptions().parse()
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results')
os.makedirs(out_path_results, exist_ok=True)
# update test options with options used during training
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
if opts.encoder_type in ENCODER_TYPES['pSp']:
net = pSp(opts)
else:
net = e4e(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=False)
if opts.n_images is None:
opts.n_images = len(dataset)
# get the image corresponding to the latent average
avg_image = get_average_image(net, opts)
if opts.dataset_type == "cars_encode":
resize_amount = (256, 192) if opts.resize_outputs else (512, 384)
else:
resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size)
global_i = 0
global_time = []
all_latents = {}
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
result_batch, result_latents = run_on_batch(input_cuda, net, opts, avg_image)
toc = time.time()
global_time.append(toc - tic)
for i in range(input_batch.shape[0]):
results = [tensor2im(result_batch[i][iter_idx]) for iter_idx in range(opts.n_iters_per_batch)]
im_path = dataset.paths[global_i]
# save step-by-step results side-by-side
for idx, result in enumerate(results):
save_dir = os.path.join(out_path_results, str(idx))
os.makedirs(save_dir, exist_ok=True)
result.resize(resize_amount).save(os.path.join(save_dir, os.path.basename(im_path)))
# store all latents with dict pairs (image_name, latents)
all_latents[os.path.basename(im_path)] = result_latents[i]
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
# save all latents as npy file
np.save(os.path.join(test_opts.exp_dir, 'latents.npy'), all_latents)
if __name__ == '__main__':
run()
| 3,601 | 32.351852 | 106 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/inference_iterative_save_coupled.py | import os
from argparse import Namespace
from tqdm import tqdm
import time
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
import sys
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from options.test_options import TestOptions
from models.psp import pSp
from models.e4e import e4e
from utils.model_utils import ENCODER_TYPES
from utils.common import tensor2im
from utils.inference_utils import run_on_batch, get_average_image
def run():
test_opts = TestOptions().parse()
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled')
os.makedirs(out_path_coupled, exist_ok=True)
# update test options with options used during training
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
if opts.encoder_type in ENCODER_TYPES['pSp']:
net = pSp(opts)
else:
net = e4e(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=False)
if opts.n_images is None:
opts.n_images = len(dataset)
# get the image corresponding to the latent average
avg_image = get_average_image(net, opts)
if opts.dataset_type == "cars_encode":
resize_amount = (256, 192) if opts.resize_outputs else (512, 384)
else:
resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size)
global_i = 0
global_time = []
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
result_batch, result_latents = run_on_batch(input_cuda, net, opts, avg_image)
toc = time.time()
global_time.append(toc - tic)
for i in range(input_batch.shape[0]):
results = [tensor2im(result_batch[i][iter_idx]) for iter_idx in range(opts.n_iters_per_batch)]
im_path = dataset.paths[global_i]
# save step-by-step results side-by-side
input_im = tensor2im(input_batch[i])
res = np.array(results[0].resize(resize_amount))
for idx, result in enumerate(results[1:]):
res = np.concatenate([res, np.array(result.resize(resize_amount))], axis=1)
res = np.concatenate([res, input_im.resize(resize_amount)], axis=1)
Image.fromarray(res).save(os.path.join(out_path_coupled, os.path.basename(im_path)))
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
if __name__ == '__main__':
run()
| 3,513 | 32.466667 | 106 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/train_restyle_e4e.py | """
This file runs the main training/val loop
"""
import os
import json
import math
import sys
import pprint
import torch
from argparse import Namespace
sys.path.append(".")
sys.path.append("..")
from options.e4e_train_options import e4eTrainOptions
from training.coach_restyle_e4e import Coach
def main():
opts = e4eTrainOptions().parse()
previous_train_ckpt = None
if opts.resume_training_from_ckpt:
opts, previous_train_ckpt = load_train_checkpoint(opts)
else:
setup_progressive_steps(opts)
create_initial_experiment_dir(opts)
coach = Coach(opts, previous_train_ckpt)
coach.train()
def load_train_checkpoint(opts):
train_ckpt_path = opts.resume_training_from_ckpt
previous_train_ckpt = torch.load(opts.resume_training_from_ckpt, map_location='cpu')
new_opts_dict = vars(opts)
opts = previous_train_ckpt['opts']
opts['resume_training_from_ckpt'] = train_ckpt_path
update_new_configs(opts, new_opts_dict)
pprint.pprint(opts)
opts = Namespace(**opts)
if opts.sub_exp_dir is not None:
sub_exp_dir = opts.sub_exp_dir
opts.exp_dir = os.path.join(opts.exp_dir, sub_exp_dir)
create_initial_experiment_dir(opts)
return opts, previous_train_ckpt
def setup_progressive_steps(opts):
log_size = int(math.log(opts.output_size, 2))
num_style_layers = 2 * log_size - 2
num_deltas = num_style_layers - 1
if opts.progressive_start is not None: # If progressive delta training
opts.progressive_steps = [0]
next_progressive_step = opts.progressive_start
for i in range(num_deltas):
opts.progressive_steps.append(next_progressive_step)
next_progressive_step += opts.progressive_step_every
assert opts.progressive_steps is None or is_valid_progressive_steps(opts, num_style_layers), \
"Invalid progressive training input"
def is_valid_progressive_steps(opts, num_style_layers):
return len(opts.progressive_steps) == num_style_layers and opts.progressive_steps[0] == 0
def create_initial_experiment_dir(opts):
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
def update_new_configs(ckpt_opts, new_opts):
for k, v in new_opts.items():
if k not in ckpt_opts:
ckpt_opts[k] = v
if new_opts['update_param_list']:
for param in new_opts['update_param_list']:
ckpt_opts[param] = new_opts[param]
if __name__ == '__main__':
main()
| 2,438 | 27.360465 | 95 | py |
IID_representation_learning | IID_representation_learning-master/restyle/scripts/train_restyle_psp.py | """
This file runs the main training/val loop
"""
import os
import json
import sys
import pprint
sys.path.append(".")
sys.path.append("..")
from options.train_options import TrainOptions
from training.coach_restyle_psp import Coach
def main():
opts = TrainOptions().parse()
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
coach = Coach(opts)
coach.train()
if __name__ == '__main__':
main()
| 560 | 17.096774 | 61 | py |
IID_representation_learning | IID_representation_learning-master/restyle/training/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/training/coach_restyle_e4e.py | from models.encoders.restyle_e4e_encoders import ProgressiveStage
from models.e4e_modules.discriminator import LatentCodesDiscriminator
from models.e4e_modules.latent_codes_pool import LatentCodesPool
from training.ranger import Ranger
from models.e4e import e4e
from criteria.lpips.lpips import LPIPS
from datasets.images_dataset import ImagesDataset
from configs import data_configs
from criteria import id_loss, moco_loss
from utils import common, train_utils
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch import nn, autograd
import torch
import os
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class Coach:
def __init__(self, opts, prev_train_checkpoint=None):
self.opts = opts
self.global_step = 0
self.device = 'cuda:0'
self.opts.device = self.device
# Initialize network
self.net = e4e(self.opts).to(self.device)
# Estimate latent_avg via dense sampling if latent_avg is not available
if self.net.latent_avg is None:
self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[
0].detach()
# get the image corresponding to the latent average
self.avg_image = self.net(self.net.latent_avg.unsqueeze(0),
input_code=True,
randomize_noise=False,
return_latents=False,
average_code=True)[0]
self.avg_image = self.avg_image.to(self.device).float().detach()
if self.opts.dataset_type == "cars_encode":
self.avg_image = self.avg_image[:, 32:224, :]
common.tensor2im(self.avg_image).save(
os.path.join(self.opts.exp_dir, 'avg_image.jpg'))
# Initialize loss
if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0:
raise ValueError(
'Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!')
self.mse_loss = nn.MSELoss().to(self.device).eval()
if self.opts.lpips_lambda > 0:
self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()
if self.opts.id_lambda > 0:
self.id_loss = id_loss.IDLoss().to(self.device).eval()
if self.opts.moco_lambda > 0:
self.moco_loss = moco_loss.MocoLoss()
# Initialize optimizer
self.optimizer = self.configure_optimizers()
# Initialize discriminator
if self.opts.w_discriminator_lambda > 0:
self.discriminator = LatentCodesDiscriminator(
512, 4).to(self.device)
self.discriminator_optimizer = torch.optim.Adam(
list(self.discriminator.parameters()), lr=opts.w_discriminator_lr)
self.real_w_pool = LatentCodesPool(self.opts.w_pool_size)
self.fake_w_pool = LatentCodesPool(self.opts.w_pool_size)
# Initialize dataset
self.train_dataset, self.test_dataset = self.configure_datasets()
self.train_dataloader = DataLoader(self.train_dataset,
batch_size=self.opts.batch_size,
shuffle=True,
num_workers=int(self.opts.workers),
drop_last=True)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.opts.test_batch_size,
shuffle=False,
num_workers=int(
self.opts.test_workers),
drop_last=True)
# Initialize logger
log_dir = os.path.join(opts.exp_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
self.logger = SummaryWriter(log_dir=log_dir)
# Initialize checkpoint dir
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.best_val_loss = None
if self.opts.save_interval is None:
self.opts.save_interval = self.opts.max_steps
if prev_train_checkpoint is not None:
self.load_from_train_checkpoint(prev_train_checkpoint)
prev_train_checkpoint = None
def load_from_train_checkpoint(self, ckpt):
print('Loading previous training data...')
self.global_step = ckpt['global_step'] + 1
self.best_val_loss = ckpt['best_val_loss']
self.net.load_state_dict(ckpt['state_dict'])
if self.opts.w_discriminator_lambda > 0:
self.discriminator.load_state_dict(
ckpt['discriminator_state_dict'])
self.discriminator_optimizer.load_state_dict(
ckpt['discriminator_optimizer_state_dict'])
if self.opts.progressive_steps:
self.check_for_progressive_training_update(
is_resume_from_ckpt=True)
print(f'Resuming training from step {self.global_step}')
def compute_discriminator_loss(self, x):
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
avg_image_for_batch.clone().detach().requires_grad_(True)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
disc_loss_dict = {}
if self.is_training_discriminator():
disc_loss_dict = self.train_discriminator(x_input)
return disc_loss_dict
def perform_train_iteration_on_batch(self, x, y):
y_hat, latent = None, None
loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
y_hat, latent = self.net.forward(
x_input, latent=None, return_latents=True)
else:
y_hat_clone = y_hat.clone().detach().requires_grad_(True)
latent_clone = latent.clone().detach().requires_grad_(True)
x_input = torch.cat([x, y_hat_clone], dim=1)
y_hat, latent = self.net.forward(
x_input, latent=latent_clone, return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
loss.backward()
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, loss_dict, id_logs
def train(self):
self.net.train()
if self.opts.progressive_steps:
self.check_for_progressive_training_update()
while self.global_step < self.opts.max_steps:
for batch_idx, batch in enumerate(self.train_dataloader):
x, y = batch
x, y = x.to(self.device).float(), y.to(self.device).float()
disc_loss_dict = self.compute_discriminator_loss(x)
self.optimizer.zero_grad()
y_hats, encoder_loss_dict, id_logs = self.perform_train_iteration_on_batch(
x, y)
self.optimizer.step()
loss_dict = {**disc_loss_dict, **encoder_loss_dict}
# Logging related
if self.global_step % self.opts.image_interval == 0 or (self.global_step < 1000 and self.global_step % 100 == 0):
self.parse_and_log_images(
id_logs, x, y, y_hats, title='images/train')
if self.global_step % self.opts.board_interval == 0:
self.print_metrics(loss_dict, prefix='train')
self.log_metrics(loss_dict, prefix='train')
# Validation related
val_loss_dict = None
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
val_loss_dict = self.validate()
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
self.best_val_loss = val_loss_dict['loss']
self.checkpoint_me(val_loss_dict, is_best=True)
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
if val_loss_dict is not None:
self.checkpoint_me(val_loss_dict, is_best=False)
else:
self.checkpoint_me(loss_dict, is_best=False)
if self.global_step == self.opts.max_steps:
print('OMG, finished training!')
break
self.global_step += 1
if self.opts.progressive_steps:
self.check_for_progressive_training_update()
def perform_val_iteration_on_batch(self, x, y):
y_hat, latent = None, None
cur_loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
else:
x_input = torch.cat([x, y_hat], dim=1)
y_hat, latent = self.net.forward(
x_input, latent=latent, return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, cur_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, cur_loss_dict, id_logs
def validate(self):
self.net.eval()
agg_loss_dict = []
for batch_idx, batch in enumerate(self.test_dataloader):
x, y = batch
x, y = x.to(self.device).float(), y.to(self.device).float()
# validate discriminator on batch
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
cur_disc_loss_dict = {}
if self.is_training_discriminator():
cur_disc_loss_dict = self.validate_discriminator(x_input)
# validate encoder on batch
with torch.no_grad():
y_hats, cur_enc_loss_dict, id_logs = self.perform_val_iteration_on_batch(
x, y)
cur_loss_dict = {**cur_disc_loss_dict, **cur_enc_loss_dict}
agg_loss_dict.append(cur_loss_dict)
# Logging related
self.parse_and_log_images(id_logs, x, y, y_hats,
title='images/test',
subscript='{:04d}'.format(batch_idx))
# For first step just do sanity test on small amount of data
if self.global_step == 0 and batch_idx >= 4:
self.net.train()
return None # Do not log, inaccurate in first batch
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
self.log_metrics(loss_dict, prefix='test')
self.print_metrics(loss_dict, prefix='test')
self.net.train()
return loss_dict
def checkpoint_me(self, loss_dict, is_best):
save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(
self.global_step)
save_dict = self.__get_save_dict()
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
torch.save(save_dict, checkpoint_path)
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
if is_best:
f.write('**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(
self.global_step, self.best_val_loss, loss_dict))
else:
f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict))
def configure_optimizers(self):
params = list(self.net.encoder.parameters())
if self.opts.train_decoder:
params += list(self.net.decoder.parameters())
else:
self.requires_grad(self.net.decoder, False)
if self.opts.optim_name == 'adam':
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
else:
optimizer = Ranger(params, lr=self.opts.learning_rate)
return optimizer
def configure_datasets(self):
if self.opts.dataset_type not in data_configs.DATASETS.keys():
raise Exception('{} is not a valid dataset_type'.format(
self.opts.dataset_type))
print('Loading dataset for {}'.format(self.opts.dataset_type))
dataset_args = data_configs.DATASETS[self.opts.dataset_type]
transforms_dict = dataset_args['transforms'](
self.opts).get_transforms()
train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'],
target_root=dataset_args['train_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_gt_train'],
opts=self.opts)
test_dataset = ImagesDataset(source_root=dataset_args['test_source_root'],
target_root=dataset_args['test_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_test'],
opts=self.opts)
print("Number of training samples: {}".format(len(train_dataset)))
print("Number of test samples: {}".format(len(test_dataset)))
return train_dataset, test_dataset
def calc_loss(self, x, y, y_hat, latent):
loss_dict = {}
loss = 0.0
id_logs = None
# Adversarial loss
if self.is_training_discriminator():
loss_disc = self.compute_adversarial_loss(latent, loss_dict)
loss += self.opts.w_discriminator_lambda * loss_disc
# delta regularization loss
if self.opts.progressive_steps and self.net.encoder.progressive_stage.value != 18:
total_delta_loss = self.compute_delta_regularization_loss(
latent, loss_dict)
loss += self.opts.delta_norm_lambda * total_delta_loss
# similarity losses
if self.opts.id_lambda > 0:
loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)
loss_dict['loss_id'] = float(loss_id)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_id * self.opts.id_lambda
if self.opts.l2_lambda > 0:
loss_l2 = F.mse_loss(y_hat, y)
loss_dict['loss_l2'] = float(loss_l2)
loss += loss_l2 * self.opts.l2_lambda
if self.opts.lpips_lambda > 0:
loss_lpips = self.lpips_loss(y_hat, y)
loss_dict['loss_lpips'] = float(loss_lpips)
loss += loss_lpips * self.opts.lpips_lambda
if self.opts.moco_lambda > 0:
loss_moco, sim_improvement, id_logs = self.moco_loss(y_hat, y, x)
loss_dict['loss_moco'] = float(loss_moco)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_moco * self.opts.moco_lambda
loss_dict['loss'] = float(loss)
return loss, loss_dict, id_logs
def compute_adversarial_loss(self, latent, loss_dict):
loss_disc = 0.
dims_to_discriminate = self.get_dims_to_discriminate() if self.is_progressive_training() else \
list(range(self.net.decoder.n_latent))
for i in dims_to_discriminate:
w = latent[:, i, :]
fake_pred = self.discriminator(w)
loss_disc += F.softplus(-fake_pred).mean()
loss_disc /= len(dims_to_discriminate)
loss_dict['encoder_discriminator_loss'] = float(loss_disc)
return loss_disc
def compute_delta_regularization_loss(self, latent, loss_dict):
total_delta_loss = 0
deltas_latent_dims = self.net.encoder.get_deltas_starting_dimensions()
first_w = latent[:, 0, :]
for i in range(1, self.net.encoder.progressive_stage.value + 1):
curr_dim = deltas_latent_dims[i]
delta = latent[:, curr_dim, :] - first_w
delta_loss = torch.norm(delta, self.opts.delta_norm, dim=1).mean()
loss_dict[f"delta{i}_loss"] = float(delta_loss)
total_delta_loss += delta_loss
loss_dict['total_delta_loss'] = float(total_delta_loss)
return total_delta_loss
def log_metrics(self, metrics_dict, prefix):
for key, value in metrics_dict.items():
self.logger.add_scalar(
'{}/{}'.format(prefix, key), value, self.global_step)
def print_metrics(self, metrics_dict, prefix):
print('Metrics for {}, step {}'.format(prefix, self.global_step))
for key, value in metrics_dict.items():
print('\t{} = '.format(key), value)
def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2):
im_data = []
for i in range(display_count):
if type(y_hat) == dict:
output_face = [
[common.tensor2im(y_hat[i][iter_idx][0]),
y_hat[i][iter_idx][1]]
for iter_idx in range(len(y_hat[i]))
]
else:
output_face = [common.tensor2im(y_hat[i])]
cur_im_data = {
'input_face': common.tensor2im(x[i]),
'target_face': common.tensor2im(y[i]),
'output_face': output_face,
}
if id_logs is not None:
for key in id_logs[i]:
cur_im_data[key] = id_logs[i][key]
im_data.append(cur_im_data)
self.log_images(title, im_data=im_data, subscript=subscript)
def log_images(self, name, im_data, subscript=None, log_latest=False):
fig = common.vis_faces(im_data)
step = self.global_step
if log_latest:
step = 0
if subscript:
path = os.path.join(self.logger.log_dir, name,
'{}_{:04d}.jpg'.format(subscript, step))
else:
path = os.path.join(self.logger.log_dir, name,
'{:04d}.jpg'.format(step))
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path)
plt.close(fig)
def __get_save_dict(self):
save_dict = {
'state_dict': self.net.state_dict(),
'opts': vars(self.opts),
'global_step': self.global_step,
'optimizer': self.optimizer.state_dict(),
'best_val_loss': self.best_val_loss,
'latent_avg': self.net.latent_avg
}
if self.opts.w_discriminator_lambda > 0:
save_dict['discriminator_state_dict'] = self.discriminator.state_dict()
save_dict['discriminator_optimizer_state_dict'] = self.discriminator_optimizer.state_dict()
return save_dict
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Util Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def get_dims_to_discriminate(self):
deltas_starting_dimensions = self.net.encoder.get_deltas_starting_dimensions()
return deltas_starting_dimensions[:self.net.encoder.progressive_stage.value + 1]
def is_progressive_training(self):
return self.opts.progressive_steps is not None
def check_for_progressive_training_update(self, is_resume_from_ckpt=False):
for i in range(len(self.opts.progressive_steps)):
# Case checkpoint
if is_resume_from_ckpt and self.global_step >= self.opts.progressive_steps[i]:
self.net.encoder.set_progressive_stage(ProgressiveStage(i))
# Case training reached progressive step
if self.global_step == self.opts.progressive_steps[i]:
self.net.encoder.set_progressive_stage(ProgressiveStage(i))
@staticmethod
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Discriminator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def is_training_discriminator(self):
return self.opts.w_discriminator_lambda > 0
@staticmethod
def discriminator_loss(real_pred, fake_pred, loss_dict):
real_loss = F.softplus(-real_pred).mean()
fake_loss = F.softplus(fake_pred).mean()
loss_dict['d_real_loss'] = float(real_loss)
loss_dict['d_fake_loss'] = float(fake_loss)
return real_loss + fake_loss
@staticmethod
def discriminator_r1_loss(real_pred, real_w):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_w, create_graph=True)
grad_penalty = grad_real.pow(2).reshape(
grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def train_discriminator(self, x):
loss_dict = {}
self.requires_grad(self.discriminator, True)
with torch.no_grad():
real_w, fake_w = self.sample_real_and_fake_latents(x)
real_pred = self.discriminator(real_w)
fake_pred = self.discriminator(fake_w)
loss = self.discriminator_loss(real_pred, fake_pred, loss_dict)
loss_dict['discriminator_loss'] = float(loss)
self.discriminator_optimizer.zero_grad()
loss.backward()
self.discriminator_optimizer.step()
# r1 regularization
d_regularize = self.global_step % self.opts.d_reg_every == 0
if d_regularize:
real_w = real_w.detach()
real_w.requires_grad = True
real_pred = self.discriminator(real_w)
r1_loss = self.discriminator_r1_loss(real_pred, real_w)
self.discriminator.zero_grad()
r1_final_loss = self.opts.r1 / 2 * r1_loss * \
self.opts.d_reg_every + 0 * real_pred[0]
r1_final_loss.backward()
self.discriminator_optimizer.step()
loss_dict['discriminator_r1_loss'] = float(r1_final_loss)
# Reset to previous state
self.requires_grad(self.discriminator, False)
return loss_dict
def validate_discriminator(self, x):
with torch.no_grad():
loss_dict = {}
real_w, fake_w = self.sample_real_and_fake_latents(x)
real_pred = self.discriminator(real_w)
fake_pred = self.discriminator(fake_w)
loss = self.discriminator_loss(real_pred, fake_pred, loss_dict)
loss_dict['discriminator_loss'] = float(loss)
return loss_dict
def sample_real_and_fake_latents(self, x):
sample_z = torch.randn(self.opts.batch_size, 512, device=self.device)
real_w = self.net.decoder.get_latent(sample_z)
fake_w = self.net.encoder(x)
if self.is_progressive_training(): # When progressive training, feed only unique w's
dims_to_discriminate = self.get_dims_to_discriminate()
fake_w = fake_w[:, dims_to_discriminate, :]
if self.opts.use_w_pool:
real_w = self.real_w_pool.query(real_w)
fake_w = self.fake_w_pool.query(fake_w)
if fake_w.ndim == 3:
fake_w = fake_w[:, 0, :]
return real_w, fake_w
| 24,241 | 43.318099 | 129 | py |
IID_representation_learning | IID_representation_learning-master/restyle/training/coach_restyle_psp.py | from training.ranger import Ranger
from models.psp import pSp
from criteria.lpips.lpips import LPIPS
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader, get_eval_loader
from configs import data_configs
from criteria import id_loss, w_norm, moco_loss
from utils import common, train_utils, data_utils
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch import nn
import torch
import os
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class Coach:
def __init__(self, opts):
self.opts = opts
self.global_step = 0
self.device = 'cuda:0'
self.opts.device = self.device
# Initialize network
self.net = pSp(self.opts).to(self.device)
# Estimate latent_avg via dense sampling if latent_avg is not available
if self.net.latent_avg is None:
self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[
0].detach()
# get the image corresponding to the latent average
self.avg_image = self.net(self.net.latent_avg.unsqueeze(0),
input_code=True,
randomize_noise=False,
return_latents=False,
average_code=True)[0]
self.avg_image = self.avg_image.to(self.device).float().detach()
if self.opts.dataset_type == "cars_encode":
self.avg_image = self.avg_image[:, 32:224, :]
common.tensor2im(self.avg_image).save(
os.path.join(self.opts.exp_dir, 'avg_image.jpg'))
# Initialize loss
if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0:
raise ValueError(
'Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!')
self.mse_loss = nn.MSELoss().to(self.device).eval()
if self.opts.lpips_lambda > 0:
self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()
if self.opts.id_lambda > 0:
self.id_loss = id_loss.IDLoss().to(self.device).eval()
if self.opts.w_norm_lambda > 0:
self.w_norm_loss = w_norm.WNormLoss(
start_from_latent_avg=self.opts.start_from_latent_avg)
if self.opts.moco_lambda > 0:
self.moco_loss = moco_loss.MocoLoss()
# Initialize optimizer
self.optimizer = self.configure_optimizers()
# Initialize dataset
self.train_dataset, self.test_dataset = self.configure_datasets()
self.train_dataloader = get_train_loader('standard',
self.train_dataset,
batch_size=self.opts.batch_size,
**{'num_workers': int(self.opts.workers),
'drop_last': True})
self.test_dataloader = get_eval_loader('standard',
self.test_dataset,
batch_size=self.opts.test_batch_size,
**{'num_workers': int(self.opts.test_workers),
'drop_last': False})
# Initialize logger
log_dir = os.path.join(opts.exp_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
self.logger = SummaryWriter(log_dir=log_dir)
# Initialize checkpoint dir
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.best_val_loss = None
if self.opts.save_interval is None:
self.opts.save_interval = self.opts.max_steps
def perform_train_iteration_on_batch(self, x, y):
y_hat, latent = None, None
loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
y_hat, latent = self.net.forward(x_input,
latent=None,
return_latents=True)
else:
y_hat_clone = y_hat.clone().detach().requires_grad_(True)
latent_clone = latent.clone().detach().requires_grad_(True)
x_input = torch.cat([x, y_hat_clone], dim=1)
y_hat, latent = self.net.forward(x_input,
latent=latent_clone,
return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
loss.backward()
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, loss_dict, id_logs
def train(self):
self.net.train()
while self.global_step < self.opts.max_steps:
for batch_idx, (x, _, _) in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
x, y = x.to(self.device).float(), x.to(self.device).float()
y_hats, loss_dict, id_logs = self.perform_train_iteration_on_batch(
x, y)
self.optimizer.step()
# Logging related
if self.global_step % self.opts.image_interval == 0 or (self.global_step < 1000 and self.global_step % 100 == 0):
self.parse_and_log_images(
id_logs, x, y, y_hats, title='images/train')
if self.global_step % self.opts.board_interval == 0:
self.print_metrics(loss_dict, prefix='train')
self.log_metrics(loss_dict, prefix='train')
# Validation related
val_loss_dict = None
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
val_loss_dict = self.validate()
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
self.best_val_loss = val_loss_dict['loss']
self.checkpoint_me(val_loss_dict, is_best=True)
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
if val_loss_dict is not None:
self.checkpoint_me(val_loss_dict, is_best=False)
else:
self.checkpoint_me(loss_dict, is_best=False)
if self.global_step == self.opts.max_steps:
print('OMG, finished training!')
break
self.global_step += 1
def perform_val_iteration_on_batch(self, x, y):
y_hat, latent = None, None
cur_loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(
0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
else:
x_input = torch.cat([x, y_hat], dim=1)
y_hat, latent = self.net.forward(
x_input, latent=latent, return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, cur_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, cur_loss_dict, id_logs
def validate(self):
self.net.eval()
agg_loss_dict = []
for batch_idx, (x, lab, meta) in enumerate(self.test_dataloader):
with torch.no_grad():
x, y = x.to(self.device).float(), x.to(self.device).float()
y_hats, cur_loss_dict, id_logs = self.perform_val_iteration_on_batch(
x, y)
agg_loss_dict.append(cur_loss_dict)
# Logging related
self.parse_and_log_images(id_logs, x, y, y_hats, title='images/test', subscript='{:04d}'.format(batch_idx),
display_count=x.shape[0])
# For first step just do sanity test on small amount of data
if self.global_step == 0 and batch_idx >= 4:
self.net.train()
return None # Do not log, inaccurate in first batch
if batch_idx >= 8:
break
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
self.log_metrics(loss_dict, prefix='test')
self.print_metrics(loss_dict, prefix='test')
self.net.train()
return loss_dict
def checkpoint_me(self, loss_dict, is_best):
save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(
self.global_step)
save_dict = self.__get_save_dict()
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
torch.save(save_dict, checkpoint_path)
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
if is_best:
f.write('**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(
self.global_step, self.best_val_loss, loss_dict))
else:
f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict))
def configure_optimizers(self):
params = list(self.net.encoder.parameters())
if self.opts.train_decoder:
params += list(self.net.decoder.parameters())
if self.opts.optim_name == 'adam':
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
else:
optimizer = Ranger(params, lr=self.opts.learning_rate)
return optimizer
def configure_datasets(self):
if self.opts.dataset_type not in data_configs.DATASETS.keys():
raise Exception('{} is not a valid dataset_type'.format(
self.opts.dataset_type))
print('Loading dataset for {}'.format(self.opts.dataset_type))
dataset_args = data_configs.DATASETS[self.opts.dataset_type]
transforms_dict = dataset_args['transforms'](
self.opts).get_transforms()
dataset = get_dataset(dataset=self.opts.dataset_type,
root_dir=dataset_args['train_source_root'],
split_scheme=self.opts.split_scheme)
train_dataset = dataset.get_subset('train',
transform=transforms_dict['transform_gt_train'])
test_dataset = dataset.get_subset('train',
transform=transforms_dict['transform_test'])
print("Number of training samples: {}".format(len(train_dataset)))
print("Number of test samples: {}".format(len(test_dataset)))
return train_dataset, test_dataset
def calc_loss(self, x, y, y_hat, latent):
loss_dict = {}
loss = 0.0
id_logs = None
if self.opts.id_lambda > 0:
loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)
loss_dict['loss_id'] = float(loss_id)
loss_dict['id_improve'] = float(sim_improvement)
loss = loss_id * self.opts.id_lambda
if self.opts.l2_lambda > 0:
loss_l2 = F.mse_loss(y_hat, y)
loss_dict['loss_l2'] = float(loss_l2)
loss += loss_l2 * self.opts.l2_lambda
if self.opts.lpips_lambda > 0:
loss_lpips = self.lpips_loss(y_hat, y)
loss_dict['loss_lpips'] = float(loss_lpips)
loss += loss_lpips * self.opts.lpips_lambda
if self.opts.w_norm_lambda > 0:
loss_w_norm = self.w_norm_loss(latent, self.net.latent_avg)
loss_dict['loss_w_norm'] = float(loss_w_norm)
loss += loss_w_norm * self.opts.w_norm_lambda
if self.opts.moco_lambda > 0:
loss_moco, sim_improvement, id_logs = self.moco_loss(y_hat, y, x)
loss_dict['loss_moco'] = float(loss_moco)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_moco * self.opts.moco_lambda
loss_dict['loss'] = float(loss)
return loss, loss_dict, id_logs
def log_metrics(self, metrics_dict, prefix):
for key, value in metrics_dict.items():
self.logger.add_scalar(
'{}/{}'.format(prefix, key), value, self.global_step)
def print_metrics(self, metrics_dict, prefix):
print('Metrics for {}, step {}'.format(prefix, self.global_step))
for key, value in metrics_dict.items():
print('\t{} = '.format(key), value)
def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2):
im_data = []
for i in range(display_count):
if type(y_hat) == dict:
output_face = [
[common.tensor2im(y_hat[i][iter_idx][0]),
y_hat[i][iter_idx][1]]
for iter_idx in range(len(y_hat[i]))
]
else:
output_face = [common.tensor2im(y_hat[i])]
cur_im_data = {
'input_face': common.tensor2im(x[i]),
'target_face': common.tensor2im(y[i]),
'output_face': output_face,
}
if id_logs is not None:
for key in id_logs[i]:
cur_im_data[key] = id_logs[i][key]
im_data.append(cur_im_data)
self.log_images(title, im_data=im_data, subscript=subscript)
def log_images(self, name, im_data, subscript=None, log_latest=False):
fig = common.vis_faces(im_data)
step = self.global_step
if log_latest:
step = 0
if subscript:
path = os.path.join(self.logger.log_dir, name,
'{}_{:04d}.jpg'.format(subscript, step))
else:
path = os.path.join(self.logger.log_dir, name,
'{:04d}.jpg'.format(step))
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path)
plt.close(fig)
def __get_save_dict(self):
save_dict = {
'state_dict': self.net.state_dict(),
'opts': vars(self.opts),
'latent_avg': self.net.latent_avg
}
return save_dict
| 15,114 | 42.811594 | 129 | py |
IID_representation_learning | IID_representation_learning-master/restyle/training/ranger.py | # Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer.
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
# and/or
# https://github.com/lessw2020/Best-Deep-Learning-Optimizers
# Ranger has now been used to capture 12 records on the FastAI leaderboard.
# This version = 20.4.11
# Credits:
# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization
# RAdam --> https://github.com/LiyuanLucasLiu/RAdam
# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
# summary of changes:
# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init.
# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
# changes 8/31/19 - fix references to *self*.N_sma_threshold;
# changed eps to 1e-5 as better default than 1e-8.
import math
import torch
from torch.optim.optimizer import Optimizer
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, # lr
alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options
betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options
use_gc=True, gc_conv_only=False
# Gradient centralization on or off, applied to conv layers only or conv + fc layers
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,
eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# gc on or off
self.use_gc = use_gc
# level of gradient centralization
self.gc_gradient_threshold = 3 if gc_conv_only else 1
def __setstate__(self, state):
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
# print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# GC operation for Conv layers and FC layers
if grad.dim() > self.gc_gradient_threshold:
grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
state['step'] += 1
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# apply lr
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] # get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha
p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor
return loss | 5,899 | 34.97561 | 169 | py |
IID_representation_learning | IID_representation_learning-master/restyle/utils/__init__.py | 0 | 0 | 0 | py |
|
IID_representation_learning | IID_representation_learning-master/restyle/utils/common.py | from PIL import Image
import matplotlib.pyplot as plt
def tensor2im(var):
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
var = ((var + 1) / 2)
var[var < 0] = 0
var[var > 1] = 1
var = var * 255
return Image.fromarray(var.astype('uint8'))
def vis_faces(log_hooks):
display_count = len(log_hooks)
n_outputs = len(log_hooks[0]['output_face']) if type(
log_hooks[0]['output_face']) == list else 1
fig = plt.figure(figsize=(6 + (n_outputs * 2), 4 * display_count))
gs = fig.add_gridspec(display_count, (2 + n_outputs))
for i in range(display_count):
hooks_dict = log_hooks[i]
fig.add_subplot(gs[i, 0])
vis_faces_iterative(hooks_dict, fig, gs, i)
plt.tight_layout()
return fig
def vis_faces_iterative(hooks_dict, fig, gs, i):
plt.imshow(hooks_dict['input_face'])
plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input'])))
fig.add_subplot(gs[i, 1])
plt.imshow(hooks_dict['target_face'])
plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(
float(hooks_dict['diff_views']), float(hooks_dict['diff_target'])))
for idx, output_idx in enumerate(range(len(hooks_dict['output_face']) - 1, -1, -1)):
output_image, similarity = hooks_dict['output_face'][output_idx]
fig.add_subplot(gs[i, 2 + idx])
plt.imshow(output_image)
plt.title('Output {}\n Target Sim={:.2f}'.format(
output_idx, float(similarity)))
| 1,486 | 35.268293 | 88 | py |
IID_representation_learning | IID_representation_learning-master/restyle/utils/data_utils.py | """
Code adopted from pix2pixHD:
https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py
"""
import os
import torch
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def linspace(start, stop, num):
"""
Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive.
Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch.
"""
# create a tensor of 'num' steps from 0 to 1
steps = torch.arange(num, dtype=torch.float32).to(start) / (num - 1)
# reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings
# - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript
# "cannot statically infer the expected size of a list in this contex", hence the code below
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# the output starts at 'start' and increments until 'stop' in each dimension
out = start[None] + steps*(stop - start)[None]
return out
| 1,541 | 31.808511 | 110 | py |
IID_representation_learning | IID_representation_learning-master/restyle/utils/inference_utils.py | import torch
def get_average_image(net, opts):
avg_image = net(net.latent_avg.unsqueeze(0),
input_code=True,
randomize_noise=False,
return_latents=False,
average_code=True)[0]
avg_image = avg_image.to('cuda').float().detach()
if opts.dataset_type == "cars_encode":
avg_image = avg_image[:, 32:224, :]
return avg_image
def run_on_batch(inputs, net, opts, avg_image):
y_hat, latent = None, None
results_batch = {idx: [] for idx in range(inputs.shape[0])}
results_latent = {idx: [] for idx in range(inputs.shape[0])}
for iter in range(opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = avg_image.unsqueeze(0).repeat(inputs.shape[0], 1, 1, 1)
x_input = torch.cat([inputs, avg_image_for_batch], dim=1)
else:
x_input = torch.cat([inputs, y_hat], dim=1)
y_hat, latent = net.forward(x_input,
latent=latent,
randomize_noise=False,
return_latents=True,
resize=opts.resize_outputs)
if opts.dataset_type == "cars_encode":
if opts.resize_outputs:
y_hat = y_hat[:, :, 32:224, :]
else:
y_hat = y_hat[:, :, 64:448, :]
# store intermediate outputs
for idx in range(inputs.shape[0]):
results_batch[idx].append(y_hat[idx])
results_latent[idx].append(latent[idx].cpu().numpy())
# resize input to 256 before feeding into next iteration
if opts.dataset_type == "cars_encode":
y_hat = torch.nn.AdaptiveAvgPool2d((192, 256))(y_hat)
else:
y_hat = net.face_pool(y_hat)
return results_batch, results_latent
| 1,879 | 35.862745 | 89 | py |
IID_representation_learning | IID_representation_learning-master/restyle/utils/model_utils.py | # specify the encoder types for pSp and e4e - this is mainly used for the inference scripts
ENCODER_TYPES = {
'pSp': ['GradualStyleEncoder', 'ResNetGradualStyleEncoder', 'BackboneEncoder', 'ResNetBackboneEncoder'],
'e4e': ['ProgressiveBackboneEncoder', 'ResNetProgressiveBackboneEncoder']
}
RESNET_MAPPING = {
'layer1.0': 'body.0',
'layer1.1': 'body.1',
'layer1.2': 'body.2',
'layer2.0': 'body.3',
'layer2.1': 'body.4',
'layer2.2': 'body.5',
'layer2.3': 'body.6',
'layer3.0': 'body.7',
'layer3.1': 'body.8',
'layer3.2': 'body.9',
'layer3.3': 'body.10',
'layer3.4': 'body.11',
'layer3.5': 'body.12',
'layer4.0': 'body.13',
'layer4.1': 'body.14',
'layer4.2': 'body.15',
}
| 743 | 28.76 | 108 | py |
IID_representation_learning | IID_representation_learning-master/restyle/utils/train_utils.py |
def aggregate_loss_dict(agg_loss_dict):
mean_vals = {}
for output in agg_loss_dict:
for key in output:
mean_vals[key] = mean_vals.setdefault(key, []) + [output[key]]
for key in mean_vals:
if len(mean_vals[key]) > 0:
mean_vals[key] = sum(mean_vals[key]) / len(mean_vals[key])
else:
print('{} has no value'.format(key))
mean_vals[key] = 0
return mean_vals
| 377 | 26 | 65 | py |
mmdetection | mmdetection-master/.owners.yml | assign:
strategy:
# random
daily-shift-based
scedule: "*/1 * * * *"
assignees:
- Czm369
- hhaAndroid
- zwhus
- RangiLyu
- BIGWangYuDong
- ZwwWayne
- ZwwWayne
| 200 | 13.357143 | 24 | yml |
mmdetection | mmdetection-master/.pre-commit-config.yaml | repos:
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.32.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.1
hooks:
- id: codespell
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/open-mmlab/pre-commit-hooks
rev: v0.2.0 # Use the ref you want to point at
hooks:
- id: check-algo-readme
- id: check-copyright
args: ["mmdet"] # replace the dir_to_check with your expected directory to check
| 1,452 | 27.490196 | 89 | yaml |
mmdetection | mmdetection-master/.readthedocs.yml | version: 2
formats: all
python:
version: 3.8
install:
- requirements: requirements/docs.txt
- requirements: requirements/readthedocs.txt
| 151 | 14.2 | 48 | yml |
mmdetection | mmdetection-master/README.md | <div align="center">
<img src="resources/mmdet-logo.png" width="600"/>
<div> </div>
<div align="center">
<b><font size="5">OpenMMLab website</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
<b><font size="5">OpenMMLab platform</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div> </div>
[](https://pypi.org/project/mmdet)
[](https://mmdetection.readthedocs.io/en/latest/)
[](https://github.com/open-mmlab/mmdetection/actions)
[](https://codecov.io/gh/open-mmlab/mmdetection)
[](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE)
[](https://github.com/open-mmlab/mmdetection/issues)
[](https://github.com/open-mmlab/mmdetection/issues)
[📘Documentation](https://mmdetection.readthedocs.io/en/stable/) |
[🛠️Installation](https://mmdetection.readthedocs.io/en/stable/get_started.html) |
[👀Model Zoo](https://mmdetection.readthedocs.io/en/stable/model_zoo.html) |
[🆕Update News](https://mmdetection.readthedocs.io/en/stable/changelog.html) |
[🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) |
[🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose)
</div>
<div align="center">
English | [简体中文](README_zh-CN.md)
</div>
<div align="center">
<a href="https://openmmlab.medium.com/" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218352562-cdded397-b0f3-4ca1-b8dd-a60df8dca75b.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://discord.gg/raweFPmdzG" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
</div>
## Introduction
MMDetection is an open source object detection toolbox based on PyTorch. It is
a part of the [OpenMMLab](https://openmmlab.com/) project.
The master branch works with **PyTorch 1.5+**.
<img src="https://user-images.githubusercontent.com/12907710/137271636-56ba1cd2-b110-4812-8221-b4c120320aa9.png"/>
<details open>
<summary>Major features</summary>
- **Modular Design**
We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.
- **Support of multiple frameworks out of box**
The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc.
- **High efficiency**
All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet).
- **State of the art**
The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward.
</details>
Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox.
## What's New
### 💎 Stable version
**2.28.2** was released in 27/2/2023:
- Fixed some known documentation, configuration and linking error issues
Please refer to [changelog.md](docs/en/changelog.md) for details and release history.
For compatibility changes between different versions of MMDetection, please refer to [compatibility.md](docs/en/compatibility.md).
### 🌟 Preview of 3.x version
#### Highlight
We are excited to announce our latest work on real-time object recognition tasks, **RTMDet**, a family of fully convolutional single-stage detectors. RTMDet not only achieves the best parameter-accuracy trade-off on object detection from tiny to extra-large model sizes but also obtains new state-of-the-art performance on instance segmentation and rotated object detection tasks. Details can be found in the [technical report](https://arxiv.org/abs/2212.07784). Pre-trained models are [here](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/rtmdet).
[](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
[](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
[](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
| Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
| ------------------------ | ------- | ------------------------------------ | ---------------------- |
| Object Detection | COCO | 52.8 | 322 |
| Instance Segmentation | COCO | 44.6 | 188 |
| Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
<div align=center>
<img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
</div>
A brand new version of **MMDetection v3.0.0rc6** was released in 27/2/2023:
- Support [Boxinst](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/boxinst), [Objects365 Dataset](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/objects365), and [Separated and Occluded COCO metric](https://github.com/open-mmlab/mmdetection/tree/3.x/docs/en/user_guides/useful_tools.md#coco-separated--occluded-mask-metric)
- Support [ConvNeXt-V2](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/ConvNeXt-V2), [DiffusionDet](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/DiffusionDet), and inference of [EfficientDet](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/EfficientDet) and [Detic](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/Detic) in `Projects`
- Refactor [DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/detr) series and support [Conditional-DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/conditional_detr), [DAB-DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/dab_detr), and [DINO](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/dino)
- Support DetInferencer, Test Time Augmentation, and auto import modules from registry
- Support RTMDet-Ins ONNXRuntime and TensorRT [deployment](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/rtmdet/README.md#deployment-tutorial)
- Support [calculating FLOPs of detectors](https://github.com/open-mmlab/mmdetection/tree/3.x/docs/en/user_guides/useful_tools.md#Model-Complexity)
Find more new features in [3.x branch](https://github.com/open-mmlab/mmdetection/tree/3.x). Issues and PRs are welcome!
## Installation
Please refer to [Installation](docs/en/get_started.md/#Installation) for installation instructions.
## Getting Started
Please see [get_started.md](docs/en/get_started.md) for the basic usage of MMDetection. We provide [colab tutorial](demo/MMDet_Tutorial.ipynb) and [instance segmentation colab tutorial](demo/MMDet_InstanceSeg_Tutorial.ipynb), and other tutorials for:
- [with existing dataset](docs/en/1_exist_data_model.md)
- [with new dataset](docs/en/2_new_data_model.md)
- [with existing dataset_new_model](docs/en/3_exist_data_new_model.md)
- [learn about configs](docs/en/tutorials/config.md)
- [customize_datasets](docs/en/tutorials/customize_dataset.md)
- [customize data pipelines](docs/en/tutorials/data_pipeline.md)
- [customize_models](docs/en/tutorials/customize_models.md)
- [customize runtime settings](docs/en/tutorials/customize_runtime.md)
- [customize_losses](docs/en/tutorials/customize_losses.md)
- [finetuning models](docs/en/tutorials/finetune.md)
- [export a model to ONNX](docs/en/tutorials/pytorch2onnx.md)
- [export ONNX to TRT](docs/en/tutorials/onnx2tensorrt.md)
- [weight initialization](docs/en/tutorials/init_cfg.md)
- [how to xxx](docs/en/tutorials/how_to.md)
## Overview of Benchmark and Model Zoo
Results and models are available in the [model zoo](docs/en/model_zoo.md).
<div align="center">
<b>Architectures</b>
</div>
<table align="center">
<tbody>
<tr align="center" valign="bottom">
<td>
<b>Object Detection</b>
</td>
<td>
<b>Instance Segmentation</b>
</td>
<td>
<b>Panoptic Segmentation</b>
</td>
<td>
<b>Other</b>
</td>
</tr>
<tr valign="top">
<td>
<ul>
<li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
<li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
<li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
<li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
<li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
<li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
<li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
<li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
<li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
<li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
<li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
<li><a href="configs/centernet">CenterNet (ArXiv'2019)</a></li>
<li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
<li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
<li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
<li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
<li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
<li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
<li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
<li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
<li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
<li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
<li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
<li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
<li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
<li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
<li><a href="configs/detr">DETR (ECCV'2020)</a></li>
<li><a href="configs/paa">PAA (ECCV'2020)</a></li>
<li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
<li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
<li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
<li><a href="configs/yolox">YOLOX (ArXiv'2021)</a></li>
<li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
<li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
<li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
<li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
<li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
<li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
<li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
<li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
<li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
<li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
<li><a href="configs/detectors">DetectoRS (CVPR'2021)</a></li>
<li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
<li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
<li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
<li><a href="configs/mask2former">Mask2Former (CVPR'2022)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
<li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
<li><a href="configs/mask2former">Mask2Former (CVPR'2022)</a></li>
</ul>
</td>
<td>
</ul>
<li><b>Contrastive Learning</b></li>
<ul>
<ul>
<li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
<li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
<li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
</ul>
</ul>
</ul>
<li><b>Distillation</b></li>
<ul>
<ul>
<li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
<li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
</ul>
</ul>
</ul>
<li><b>Receptive Field Search</b></li>
<ul>
<ul>
<li><a href="configs/rfnext">RF-Next (TPAMI'2022)</a></li>
</ul>
</ul>
</ul>
</td>
</tr>
</td>
</tr>
</tbody>
</table>
<div align="center">
<b>Components</b>
</div>
<table align="center">
<tbody>
<tr align="center" valign="bottom">
<td>
<b>Backbones</b>
</td>
<td>
<b>Necks</b>
</td>
<td>
<b>Loss</b>
</td>
<td>
<b>Common</b>
</td>
</tr>
<tr valign="top">
<td>
<ul>
<li>VGG (ICLR'2015)</li>
<li>ResNet (CVPR'2016)</li>
<li>ResNeXt (CVPR'2017)</li>
<li>MobileNetV2 (CVPR'2018)</li>
<li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
<li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
<li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
<li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
<li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
<li><a href="configs/resnest">ResNeSt (CVPRW'2022)</a></li>
<li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
<li><a href="configs/swin">Swin (ICCV'2021)</a></li>
<li><a href="configs/pvt">PVTv2 (CVMJ'2022)</a></li>
<li><a href="configs/resnet_strikes_back">ResNet strikes back (NeurIPSW'2021)</a></li>
<li><a href="configs/efficientnet">EfficientNet (ICML'2019)</a></li>
<li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
<li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
<li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
<li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
<li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
<li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
<li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
<li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
<li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
<li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
<li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
<li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
<li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
<li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
<li><a href="configs/resnet_strikes_back">Resnet strikes back (NeurIPSW'2021)</a></li>
<li><a href="configs/rfnext">RF-Next (TPAMI'2022)</a></li>
</ul>
</td>
</tr>
</td>
</tr>
</tbody>
</table>
Some other methods are also supported in [projects using MMDetection](./docs/en/projects.md).
## FAQ
Please refer to [FAQ](docs/en/faq.md) for frequently asked questions.
## Contributing
We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
## Acknowledgement
MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.
## Citation
If you use this toolbox or benchmark in your research, please cite this project.
```
@article{mmdetection,
title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
journal= {arXiv preprint arXiv:1906.07155},
year={2019}
}
```
## License
This project is released under the [Apache 2.0 license](LICENSE).
## Projects in OpenMMLab
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models.
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
- [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries.
- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
| 22,446 | 53.615572 | 560 | md |
mmdetection | mmdetection-master/README_zh-CN.md | <div align="center">
<img src="resources/mmdet-logo.png" width="600"/>
<div> </div>
<div align="center">
<b><font size="5">OpenMMLab 官网</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
<b><font size="5">OpenMMLab 开放平台</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div> </div>
[](https://pypi.org/project/mmdet)
[](https://mmdetection.readthedocs.io/en/latest/)
[](https://github.com/open-mmlab/mmdetection/actions)
[](https://codecov.io/gh/open-mmlab/mmdetection)
[](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE)
[](https://github.com/open-mmlab/mmdetection/issues)
[](https://github.com/open-mmlab/mmdetection/issues)
[📘使用文档](https://mmdetection.readthedocs.io/zh_CN/stable/) |
[🛠️安装教程](https://mmdetection.readthedocs.io/zh_CN/stable/get_started.html) |
[👀模型库](https://mmdetection.readthedocs.io/zh_CN/stable/model_zoo.html) |
[🆕更新日志](https://mmdetection.readthedocs.io/en/stable/changelog.html) |
[🚀进行中的项目](https://github.com/open-mmlab/mmdetection/projects) |
[🤔报告问题](https://github.com/open-mmlab/mmdetection/issues/new/choose)
</div>
<div align="center">
[English](README.md) | 简体中文
</div>
## 简介
MMDetection 是一个基于 PyTorch 的目标检测开源工具箱。它是 [OpenMMLab](https://openmmlab.com/) 项目的一部分。
主分支代码目前支持 PyTorch 1.5 以上的版本。
<img src="https://user-images.githubusercontent.com/12907710/137271636-56ba1cd2-b110-4812-8221-b4c120320aa9.png"/>
<details open>
<summary>主要特性</summary>
- **模块化设计**
MMDetection 将检测框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的检测模型
- **丰富的即插即用的算法和模型**
MMDetection 支持了众多主流的和最新的检测算法,例如 Faster R-CNN,Mask R-CNN,RetinaNet 等。
- **速度快**
基本的框和 mask 操作都实现了 GPU 版本,训练速度比其他代码库更快或者相当,包括 [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) 和 [SimpleDet](https://github.com/TuSimple/simpledet)。
- **性能高**
MMDetection 这个算法库源自于 COCO 2018 目标检测竞赛的冠军团队 *MMDet* 团队开发的代码,我们在之后持续进行了改进和提升。
</details>
除了 MMDetection 之外,我们还开源了计算机视觉基础库 [MMCV](https://github.com/open-mmlab/mmcv),MMCV 是 MMDetection 的主要依赖。
## 最新进展
### 💎 稳定版本
最新的 **2.28.2** 版本已经在 2023.2.27 发布:
- 修复了一些已知的文档、配置和链接错误问题
如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/changelog.md)。
如果想了解 MMDetection 不同版本之间的兼容性, 请参考[兼容性说明文档](docs/zh_cn/compatibility.md)。
### 🌟 3.x 预览版本
#### 亮点
我们很高兴向大家介绍我们在实时目标识别任务方面的最新成果 RTMDet,包含了一系列的全卷积单阶段检测模型。 RTMDet 不仅在从 tiny 到 extra-large 尺寸的目标检测模型上实现了最佳的参数量和精度的平衡,而且在实时实例分割和旋转目标检测任务上取得了最先进的成果。 更多细节请参阅[技术报告](https://arxiv.org/abs/2212.07784)。 预训练模型可以在[这里](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/rtmdet)找到。
[](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
[](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
[](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
| Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
| ------------------------ | ------- | ------------------------------------ | ---------------------- |
| Object Detection | COCO | 52.8 | 322 |
| Instance Segmentation | COCO | 44.6 | 188 |
| Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
<div align=center>
<img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
</div>
全新的 **v3.0.0rc6** 版本已经在 2023.2.27 发布:
- 支持了 [Boxinst](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/boxinst), [Objects365 Dataset](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/objects365) 和 [Separated and Occluded COCO metric](https://github.com/open-mmlab/mmdetection/tree/3.x/docs/zh_cn/user_guides/useful_tools.md#coco-分离和遮挡实例分割性能评估)
- 在 `Projects` 中支持了 [ConvNeXt-V2](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/ConvNeXt-V2), [DiffusionDet](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/DiffusionDet) 和 [EfficientDet](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/EfficientDet), [Detic](https://github.com/open-mmlab/mmdetection/tree/3.x/projects/Detic) 的推理
- 重构了 [DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/detr) 系列并支持了 [Conditional-DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/conditional_detr), [DAB-DETR](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/dab_detr) 和 [DINO](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/dino)
- 支持了 DetInferencer, Test Time Augmentation 以及从注册表 (registry) 自动导入模块
- 支持了 RTMDet-Ins 的 ONNXRuntime 和 TensorRT [部署](https://github.com/open-mmlab/mmdetection/tree/3.x/configs/rtmdet/README.md#deployment-tutorial)
- 支持了检测器[计算 FLOPS](https://github.com/open-mmlab/mmdetection/tree/3.x/docs/zh_cn/user_guides/useful_tools.md#模型复杂度)
## 安装
请参考[安装指令](docs/zh_cn/get_started.md/#Installation)进行安装。
## 教程
请参考[快速入门文档](docs/zh_cn/get_started.md)学习 MMDetection 的基本使用。
我们提供了 [检测的 colab 教程](demo/MMDet_Tutorial.ipynb) 和 [实例分割的 colab 教程](demo/MMDet_InstanceSeg_Tutorial.ipynb),也为新手提供了完整的运行教程,其他教程如下
- [使用已有模型在标准数据集上进行推理](docs/zh_cn/1_exist_data_model.md)
- [在自定义数据集上进行训练](docs/zh_cn/2_new_data_model.md)
- [在标准数据集上训练自定义模型](docs/zh_cn/3_exist_data_new_model.md)
- [学习配置文件](docs/zh_cn/tutorials/config.md)
- [自定义数据集](docs/zh_cn/tutorials/customize_dataset.md)
- [自定义数据预处理流程](docs/zh_cn/tutorials/data_pipeline.md)
- [自定义模型](docs/zh_cn/tutorials/customize_models.md)
- [自定义训练配置](docs/zh_cn/tutorials/customize_runtime.md)
- [自定义损失函数](docs/zh_cn/tutorials/customize_losses.md)
- [模型微调](docs/zh_cn/tutorials/finetune.md)
- [Pytorch 到 ONNX 的模型转换](docs/zh_cn/tutorials/pytorch2onnx.md)
- [ONNX 到 TensorRT 的模型转换](docs/zh_cn/tutorials/onnx2tensorrt.md)
- [权重初始化](docs/zh_cn/tutorials/init_cfg.md)
- [how to xxx](docs/zh_cn/tutorials/how_to.md)
同时,我们还提供了 [MMDetection 中文解读文案汇总](docs/zh_cn/article.md)
## 基准测试和模型库
测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。
<div align="center">
<b>算法架构</b>
</div>
<table align="center">
<tbody>
<tr align="center" valign="bottom">
<td>
<b>Object Detection</b>
</td>
<td>
<b>Instance Segmentation</b>
</td>
<td>
<b>Panoptic Segmentation</b>
</td>
<td>
<b>Other</b>
</td>
</tr>
<tr valign="top">
<td>
<ul>
<li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
<li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
<li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
<li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
<li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
<li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
<li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
<li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
<li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
<li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
<li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
<li><a href="configs/centernet">CenterNet (ArXiv'2019)</a></li>
<li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
<li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
<li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
<li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
<li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
<li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
<li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
<li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
<li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
<li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
<li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
<li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
<li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
<li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
<li><a href="configs/detr">DETR (ECCV'2020)</a></li>
<li><a href="configs/paa">PAA (ECCV'2020)</a></li>
<li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
<li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
<li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
<li><a href="configs/yolox">YOLOX (ArXiv'2021)</a></li>
<li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
<li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
<li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
<li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
<li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
<li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
<li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
<li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
<li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
<li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
<li><a href="configs/detectors">DetectoRS (CVPR'2021)</a></li>
<li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
<li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
<li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
<li><a href="configs/mask2former">Mask2Former (CVPR'2022)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
<li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
<li><a href="configs/mask2former">Mask2Former (CVPR'2022)</a></li>
</ul>
</td>
<td>
</ul>
<li><b>Contrastive Learning</b></li>
<ul>
<ul>
<li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
<li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
<li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
</ul>
</ul>
</ul>
<li><b>Distillation</b></li>
<ul>
<ul>
<li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
<li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
</ul>
</ul>
</ul>
<li><b>Receptive Field Search</b></li>
<ul>
<ul>
<li><a href="configs/rfnext">RF-Next (TPAMI'2022)</a></li>
</ul>
</ul>
</ul>
</td>
</tr>
</td>
</tr>
</tbody>
</table>
<div align="center">
<b>模块组件</b>
</div>
<table align="center">
<tbody>
<tr align="center" valign="bottom">
<td>
<b>Backbones</b>
</td>
<td>
<b>Necks</b>
</td>
<td>
<b>Loss</b>
</td>
<td>
<b>Common</b>
</td>
</tr>
<tr valign="top">
<td>
<ul>
<li>VGG (ICLR'2015)</li>
<li>ResNet (CVPR'2016)</li>
<li>ResNeXt (CVPR'2017)</li>
<li>MobileNetV2 (CVPR'2018)</li>
<li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
<li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
<li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
<li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
<li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
<li><a href="configs/resnest">ResNeSt (CVPRW'2022)</a></li>
<li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
<li><a href="configs/swin">Swin (ICCV'2021)</a></li>
<li><a href="configs/pvt">PVTv2 (CVMJ'2022)</a></li>
<li><a href="configs/resnet_strikes_back">ResNet strikes back (NeurIPSW'2021)</a></li>
<li><a href="configs/efficientnet">EfficientNet (ICML'2019)</a></li>
<li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
<li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
<li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
<li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
<li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
<li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
<li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
<li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
<li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
<li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
<li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
<li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
<li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
<li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
<li><a href="configs/resnet_strikes_back">Resnet strikes back (NeurIPSW'2021)</a></li>
<li><a href="configs/rfnext">RF-Next (TPAMI'2022)</a></li>
</ul>
</td>
</tr>
</td>
</tr>
</tbody>
</table>
我们在[基于 MMDetection 的项目](./docs/zh_cn/projects.md)中列举了一些其他的支持的算法。
## 常见问题
请参考 [FAQ](docs/zh_cn/faq.md) 了解其他用户的常见问题。
## 贡献指南
我们感谢所有的贡献者为改进和提升 MMDetection 所作出的努力。我们将正在进行中的项目添加进了[GitHub Projects](https://github.com/open-mmlab/mmdetection/projects)页面,非常欢迎社区用户能参与进这些项目中来。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
## 致谢
MMDetection 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。
## 引用
如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDetection。
```
@article{mmdetection,
title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
journal= {arXiv preprint arXiv:1906.07155},
year={2019}
}
```
## 开源许可证
该项目采用 [Apache 2.0 开源许可证](LICENSE)。
## OpenMMLab 的其他项目
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
- [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库
- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
## 欢迎加入 OpenMMLab 社区
扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的官方交流 QQ 群
<div align="center">
<img src="resources/zhihu_qrcode.jpg" height="400" /> <img src="https://cdn.vansin.top/OpenMMLab/q3.png" height="400" />
</div>
我们会在 OpenMMLab 社区为大家
- 📢 分享 AI 框架的前沿核心技术
- 💻 解读 PyTorch 常用模块源码
- 📰 发布 OpenMMLab 的相关新闻
- 🚀 介绍 OpenMMLab 开发的前沿算法
- 🏃 获取更高效的问题答疑和意见反馈
- 🔥 提供与各行各业开发者充分交流的平台
干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
| 18,742 | 44.055288 | 367 | md |
mmdetection | mmdetection-master/model-index.yml | Import:
- configs/atss/metafile.yml
- configs/autoassign/metafile.yml
- configs/carafe/metafile.yml
- configs/cascade_rcnn/metafile.yml
- configs/cascade_rpn/metafile.yml
- configs/centernet/metafile.yml
- configs/centripetalnet/metafile.yml
- configs/cornernet/metafile.yml
- configs/convnext/metafile.yml
- configs/dcn/metafile.yml
- configs/dcnv2/metafile.yml
- configs/deformable_detr/metafile.yml
- configs/detectors/metafile.yml
- configs/detr/metafile.yml
- configs/double_heads/metafile.yml
- configs/dyhead/metafile.yml
- configs/dynamic_rcnn/metafile.yml
- configs/efficientnet/metafile.yml
- configs/empirical_attention/metafile.yml
- configs/faster_rcnn/metafile.yml
- configs/fcos/metafile.yml
- configs/foveabox/metafile.yml
- configs/fpg/metafile.yml
- configs/free_anchor/metafile.yml
- configs/fsaf/metafile.yml
- configs/gcnet/metafile.yml
- configs/gfl/metafile.yml
- configs/ghm/metafile.yml
- configs/gn/metafile.yml
- configs/gn+ws/metafile.yml
- configs/grid_rcnn/metafile.yml
- configs/groie/metafile.yml
- configs/guided_anchoring/metafile.yml
- configs/hrnet/metafile.yml
- configs/htc/metafile.yml
- configs/instaboost/metafile.yml
- configs/lad/metafile.yml
- configs/ld/metafile.yml
- configs/libra_rcnn/metafile.yml
- configs/mask_rcnn/metafile.yml
- configs/ms_rcnn/metafile.yml
- configs/nas_fcos/metafile.yml
- configs/nas_fpn/metafile.yml
- configs/openimages/metafile.yml
- configs/paa/metafile.yml
- configs/pafpn/metafile.yml
- configs/panoptic_fpn/metafile.yml
- configs/pvt/metafile.yml
- configs/pisa/metafile.yml
- configs/point_rend/metafile.yml
- configs/queryinst/metafile.yml
- configs/regnet/metafile.yml
- configs/reppoints/metafile.yml
- configs/res2net/metafile.yml
- configs/resnest/metafile.yml
- configs/retinanet/metafile.yml
- configs/sabl/metafile.yml
- configs/scnet/metafile.yml
- configs/scratch/metafile.yml
- configs/seesaw_loss/metafile.yml
- configs/sparse_rcnn/metafile.yml
- configs/solo/metafile.yml
- configs/ssd/metafile.yml
- configs/swin/metafile.yml
- configs/tridentnet/metafile.yml
- configs/tood/metafile.yml
- configs/vfnet/metafile.yml
- configs/yolact/metafile.yml
- configs/yolo/metafile.yml
- configs/yolof/metafile.yml
- configs/yolox/metafile.yml
- configs/rfnext/metafile.yml
| 2,401 | 31.459459 | 44 | yml |
mmdetection | mmdetection-master/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 7,887 | 34.692308 | 125 | py |
mmdetection | mmdetection-master/.circleci/config.yml | version: 2.1
# this allows you to use CircleCI's dynamic configuration feature
setup: true
# the path-filtering orb is required to continue a pipeline based on
# the path of an updated fileset
orbs:
path-filtering: circleci/[email protected]
workflows:
# the always-run workflow is always triggered, regardless of the pipeline parameters.
always-run:
jobs:
# the path-filtering/filter job determines which pipeline
# parameters to update.
- path-filtering/filter:
name: check-updated-files
# 3-column, whitespace-delimited mapping. One mapping per
# line:
# <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
mapping: |
mmdet/.* lint_only false
requirements/.* lint_only false
tests/.* lint_only false
tools/.* lint_only false
configs/.* lint_only false
.circleci/.* lint_only false
base-revision: master
# this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are
# complete. In this case, we are using the parent dynamic
# configuration itself.
config-path: .circleci/test.yml
| 1,275 | 35.457143 | 87 | yml |
mmdetection | mmdetection-master/.circleci/test.yml |
version: 2.1
# the default pipeline parameters, which will be updated according to
# the results of the path-filtering orb
parameters:
lint_only:
type: boolean
default: true
jobs:
lint:
docker:
- image: cimg/python:3.7.4
steps:
- checkout
- run:
name: Install pre-commit hook
command: |
pip install pre-commit
pre-commit install
- run:
name: Linting
command: pre-commit run --all-files
- run:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmdet
build_cpu:
parameters:
# The python version must match available image tags in
# https://circleci.com/developer/images/image/cimg/python
python:
type: string
torch:
type: string
torchvision:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
steps:
- checkout
- run:
name: Get MMCV_TORCH as environment variables
command: |
. .circleci/scripts/get_mmcv_var.sh << parameters.torch >>
source $BASH_ENV
- run:
name: Install Libraries
command: |
sudo apt-get update
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 libgeos-dev cmake git
- run:
name: Configure Python & pip
command: |
python -m pip install --upgrade pip
python -m pip install wheel
- run:
name: Install PyTorch
command: |
python -V
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- run:
name: Install mmdet dependencies
command: |
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${MMCV_TORCH}/index.html
python -m pip install -r requirements.txt
python -m pip install albumentations --no-binary qudida,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
- run:
name: Build and install
command: |
python -m pip install -e .
- run:
name: Run unittests
command: |
python -m coverage run --branch --source mmdet -m pytest tests/
python -m coverage xml
python -m coverage report -m
build_cuda:
parameters:
torch:
type: string
cuda:
type: enum
enum: ["10.1", "10.2", "11.1"]
cudnn:
type: integer
default: 7
machine:
image: ubuntu-2004-cuda-11.4:202110-01
docker_layer_caching: true
resource_class: gpu.nvidia.small
steps:
- checkout
- run:
name: Get MMCV_TORCH and MMCV_CUDA as environment variables
command: |
. .circleci/scripts/get_mmcv_var.sh << parameters.torch >> << parameters.cuda >>
source $BASH_ENV
- run:
name: Build Docker image
command: |
docker build .circleci/docker -t mmdet:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
docker run --gpus all -t -d -v /home/circleci/project:/mmdet -w /mmdet --name mmdet mmdet:gpu
- run:
name: Install mmdet dependencies
command: |
docker exec mmdet pip install --upgrade pip
docker exec mmdet pip install wheel
docker exec mmdet pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/${MMCV_CUDA}/torch${MMCV_TORCH}/index.html
docker exec mmdet pip install -r requirements.txt
docker exec mmdet pip install typing-extensions -U
docker exec mmdet pip install albumentations --use-pep517 qudida albumentations
docker exec mmdet python -c 'import albumentations; print(albumentations.__version__)'
docker exec mmdet pip install git+https://github.com/cocodataset/panopticapi.git
- run:
name: Build and install
command: |
docker exec mmdet pip install -e .
- run:
name: Run unittests
command: |
docker exec mmdet python -m pytest tests/
workflows:
pr_stage_lint:
when: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- master
pr_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- master
- build_cpu:
name: minimum_version_cpu
torch: 1.6.0
torchvision: 0.7.0
python: 3.7.7
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 1.9.0
torchvision: 0.10.0
python: 3.8.0
requires:
- minimum_version_cpu
- hold:
type: approval
requires:
- maximum_version_cpu
- build_cuda:
name: mainstream_version_gpu
torch: 1.8.1
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.2"
requires:
- hold
merge_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- build_cuda:
name: minimum_version_gpu
torch: 1.6.0
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.1"
filters:
branches:
only:
- master
| 6,158 | 31.415789 | 177 | yml |
mmdetection | mmdetection-master/.circleci/scripts/get_mmcv_var.sh | #!/bin/bash
TORCH=$1
CUDA=$2
# 10.2 -> cu102
MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
# MMCV only provides pre-compiled packages for torch 1.x.0
# which works for any subversions of torch 1.x.
# We force the torch version to be 1.x.0 to ease package searching
# and avoid unnecessary rebuild during MMCV's installation.
TORCH_VER_ARR=(${TORCH//./ })
TORCH_VER_ARR[2]=0
printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
echo "export MMCV_CUDA=${MMCV_CUDA}" >> $BASH_ENV
echo "export MMCV_TORCH=${MMCV_TORCH}" >> $BASH_ENV
| 574 | 27.75 | 66 | sh |
mmdetection | mmdetection-master/.dev_scripts/batch_test_list.py | # Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc_r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8x2_150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
empirical_attention = dict(
config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
free_anchor = dict(
config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
gn = dict(
config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = [
dict(
config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
),
dict(
config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.6),
),
]
hrnet = dict(
config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
pisa = dict(
config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
regnet = dict(
config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1x8_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_320_273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
centernet = dict(
config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8x8_300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.5),
)
# yapf: enable
| 12,707 | 34.3 | 117 | py |
mmdetection | mmdetection-master/.dev_scripts/benchmark_filter.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,106 | 41.303571 | 92 | py |
mmdetection | mmdetection-master/.dev_scripts/benchmark_inference_fps.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
| 6,764 | 38.561404 | 79 | py |
mmdetection | mmdetection-master/.dev_scripts/benchmark_test_image.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
from mmcv import Config
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.utils import get_root_logger
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if 'flip' in cfg.data.test.pipeline[1]:
cfg.data.test.pipeline[1].flip = True
else:
if logger is not None:
logger.error(f'{config_name}: unable to start aug test')
else:
print(f'{config_name}: unable to start aug test', flush=True)
model = init_detector(cfg, checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show:
show_result_pyplot(
model,
args.img,
result,
score_thr=args.score_thr,
wait_time=args.wait_time)
return result
# Sample test whether the inference code is correct
def main(args):
config = Config.fromfile(args.config)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = get_root_logger(
log_file='benchmark_test_image.log', log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,674 | 34.679612 | 77 | py |
mmdetection | mmdetection-master/.dev_scripts/check_links.py | # Modified from:
# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py
import argparse
import logging
import os
import pathlib
import re
import sys
from multiprocessing.dummy import Pool
from typing import NamedTuple, Optional, Tuple
import requests
from mmcv.utils import get_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Goes through all the inline-links '
'in markdown files and reports the breakages')
parser.add_argument(
'--num-threads',
type=int,
default=100,
help='Number of processes to confirm the link')
parser.add_argument('--https-proxy', type=str, help='https proxy')
parser.add_argument(
'--out',
type=str,
default='link_reports.txt',
help='output path of reports')
args = parser.parse_args()
return args
OK_STATUS_CODES = (
200,
401, # the resource exists but may require some sort of login.
403, # ^ same
405, # HEAD method not allowed.
# the resource exists, but our default 'Accept-' header may not
# match what the server can provide.
406,
)
class MatchTuple(NamedTuple):
source: str
name: str
link: str
def check_link(
match_tuple: MatchTuple,
http_session: requests.Session,
logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
reason: Optional[str] = None
if match_tuple.link.startswith('http'):
result_ok, reason = check_url(match_tuple, http_session)
else:
result_ok = check_path(match_tuple)
if logger is None:
print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
else:
logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
return match_tuple, result_ok, reason
def check_url(match_tuple: MatchTuple,
http_session: requests.Session) -> Tuple[bool, str]:
"""Check if a URL is reachable."""
try:
result = http_session.head(
match_tuple.link, timeout=5, allow_redirects=True)
return (
result.ok or result.status_code in OK_STATUS_CODES,
f'status code = {result.status_code}',
)
except (requests.ConnectionError, requests.Timeout):
return False, 'connection error'
def check_path(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split('#')[0]
full_path = os.path.join(
os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path)
def main():
args = parse_args()
# setup logger
logger = get_logger(name='mmdet', log_file=args.out)
# setup https_proxy
if args.https_proxy:
os.environ['https_proxy'] = args.https_proxy
# setup http_session
http_session = requests.Session()
for resource_prefix in ('http://', 'https://'):
http_session.mount(
resource_prefix,
requests.adapters.HTTPAdapter(
max_retries=5,
pool_connections=20,
pool_maxsize=args.num_threads),
)
logger.info('Finding all markdown files in the current directory...')
project_root = (pathlib.Path(__file__).parent / '..').resolve()
markdown_files = project_root.glob('**/*.md')
all_matches = set()
url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)')
for markdown_file in markdown_files:
with open(markdown_file) as handle:
for line in handle.readlines():
matches = url_regex.findall(line)
for name, link in matches:
if 'localhost' not in link:
all_matches.add(
MatchTuple(
source=str(markdown_file),
name=name,
link=link))
logger.info(f' {len(all_matches)} markdown files found')
logger.info('Checking to make sure we can retrieve each link...')
with Pool(processes=args.num_threads) as pool:
results = pool.starmap(check_link, [(match, http_session, logger)
for match in list(all_matches)])
# collect unreachable results
unreachable_results = [(match_tuple, reason)
for match_tuple, success, reason in results
if not success]
if unreachable_results:
logger.info('================================================')
logger.info(f'Unreachable links ({len(unreachable_results)}):')
for match_tuple, reason in unreachable_results:
logger.info(' > Source: ' + match_tuple.source)
logger.info(' Name: ' + match_tuple.name)
logger.info(' Link: ' + match_tuple.link)
if reason is not None:
logger.info(' Reason: ' + reason)
sys.exit(1)
logger.info('No Unreachable link found.')
if __name__ == '__main__':
main()
| 5,049 | 30.962025 | 76 | py |
mmdetection | mmdetection-master/.dev_scripts/convert_test_benchmark_script.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--work-dir',
default='tools/batch_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = osp.join(work_dir, fname)
checkpoint = model_info['checkpoint'].strip()
if not isinstance(model_info['eval'], list):
evals = [model_info['eval']]
else:
evals = model_info['eval']
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 3,604 | 29.041667 | 79 | py |
mmdetection | mmdetection-master/.dev_scripts/convert_train_benchmark_script.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--partition',
type=str,
default='openmmlab',
help='slurm partition name')
parser.add_argument(
'--max-keep-ckpts',
type=int,
default=1,
help='The maximum checkpoints to keep')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
partition = args.partition # cluster name
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
# stdout is no output
stdout_cfg = '>/dev/null'
max_keep_ckpts = args.max_keep_ckpts
commands = []
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = osp.join(root_name, 'work_dir', fname)
# default setting
if cfg.find('16x') >= 0:
command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \
cfg.find('gn-head_4x4_2x_coco.py') >= 0:
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
else:
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
command_info += f'{partition} '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
if max_keep_ckpts:
command_info += f'--cfg-options ' \
f'checkpoint_config.max_keep_ckpts=' \
f'{max_keep_ckpts}' + ' '
command_info += f'{stdout_cfg} &'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 3,307 | 32.08 | 74 | py |
mmdetection | mmdetection-master/.dev_scripts/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import mmcv
import torch
import yaml
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
return cfg.runner.type == 'EpochBasedRunner'
def get_final_epoch_or_iter(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
return cfg.runner.max_epochs
else:
return cfg.runner.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path.\
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
else:
return cfg.runner.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut,
by_epoch=True):
result_dict = dict()
last_val_line = None
last_train_line = None
last_val_line_idx = -1
last_train_line_idx = -1
with open(log_json_path, 'r') as f:
for i, line in enumerate(f.readlines()):
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if by_epoch:
if (log_line['mode'] == 'train'
and log_line['epoch'] == epoch_or_iter):
result_dict['memory'] = log_line['memory']
if (log_line['mode'] == 'val'
and log_line['epoch'] == epoch_or_iter):
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
else:
if log_line['mode'] == 'train':
last_train_line_idx = i
last_train_line = log_line
if log_line and log_line['mode'] == 'val':
last_val_line_idx = i
last_val_line = log_line
# bug: max_iters = 768, last_train_line['iter'] = 750
assert last_val_line_idx == last_train_line_idx + 1, \
'Log file is incomplete'
result_dict['memory'] = last_train_line['memory']
result_dict.update({
key: last_val_line[key]
for key in results_lut if key in last_val_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset',
Objects365V1Dataset='Objects365 v1',
Objects365V2Dataset='Objects365 v2')
cfg = mmcv.Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + '_mAP'
model_performance = get_final_results(log_json_path,
final_epoch_or_iter, results_lut,
by_epoch)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_info = dict(
config=used_config,
results=model_performance,
model_time=model_time,
final_model=final_model,
log_json_path=osp.split(log_json_path)[-1])
model_info['epochs' if by_epoch else 'iterations'] =\
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 12,487 | 35.408163 | 79 | py |
mmdetection | mmdetection-master/.dev_scripts/gather_test_benchmark_metric.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
| 3,916 | 39.381443 | 79 | py |
mmdetection | mmdetection-master/.dev_scripts/gather_train_benchmark_metric.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from gather_models import get_final_results
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
sheet = readbook.sheet_by_name('Sheet1')
sheet_info = {}
total_nrows = sheet.nrows
for i in range(3, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
xlrw = copy(readbook)
table = xlrw.get_sheet(0)
root_path = args.root
metrics_out = args.out
result_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config
cfg = mmcv.Config.fromfile(config)
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
final_results = [final_results]
final_results_out = []
for key in final_results:
if 'proposal_fast' in key:
final_results_out.append('AR@1000') # RPN
elif 'mAP' not in key:
final_results_out.append(key + '_mAP')
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)):
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
model_performance = get_final_results(
log_json_path, total_epochs, final_results_out)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
for performance in model_performance:
if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
metric = round(
model_performance[performance] * 100, 1)
model_performance[performance] = metric
result_dict[config] = model_performance
# update and append excel content
if args.excel:
if 'AR@1000' in model_performance:
metrics = f'{model_performance["AR@1000"]}' \
f'(AR@1000)'
elif 'segm_mAP' in model_performance:
metrics = f'{model_performance["bbox_mAP"]}/' \
f'{model_performance["segm_mAP"]}'
else:
metrics = f'{model_performance["bbox_mAP"]}'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, metrics)
else:
table.write(total_nrows, 0, config)
table.write(total_nrows, args.ncol, metrics)
total_nrows += 1
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'model_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
print(f'>>> Output {filename}_o{sufflx}')
| 5,843 | 37.701987 | 79 | py |
mmdetection | mmdetection-master/.dev_scripts/linter.sh | yapf -r -i mmdet/ configs/ tests/ tools/
isort -rc mmdet/ configs/ tests/ tools/
flake8 .
| 90 | 21.75 | 40 | sh |
mmdetection | mmdetection-master/.dev_scripts/test_benchmark.sh | PARTITION=$1
CHECKPOINT_DIR=$2
echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir tools/batch_test/atss_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29666 &
echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir tools/batch_test/autoassign_r50_fpn_8x2_1x_coco --eval bbox --cfg-option dist_params.port=29667 &
echo 'configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_carafe_1x_coco configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_carafe_1x_coco --eval bbox --cfg-option dist_params.port=29668 &
echo 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir tools/batch_test/cascade_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29669 &
echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir tools/batch_test/cascade_mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29670 &
echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir tools/batch_test/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29671 &
echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir tools/batch_test/centripetalnet_hourglass104_mstest_16x6_210e_coco --eval bbox --cfg-option dist_params.port=29672 &
echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir tools/batch_test/cornernet_hourglass104_mstest_8x6_210e_coco --eval bbox --cfg-option dist_params.port=29673 &
echo 'configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco --eval bbox --cfg-option dist_params.port=29674 &
echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir tools/batch_test/deformable_detr_r50_16x2_50e_coco --eval bbox --cfg-option dist_params.port=29675 &
echo 'configs/detectors/detectors_htc_r50_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir tools/batch_test/detectors_htc_r50_1x_coco --eval bbox segm --cfg-option dist_params.port=29676 &
echo 'configs/detr/detr_r50_8x2_150e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir tools/batch_test/detr_r50_8x2_150e_coco --eval bbox --cfg-option dist_params.port=29677 &
echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir tools/batch_test/dh_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29678 &
echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir tools/batch_test/dynamic_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29679 &
echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_attention_1111_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_attention_1111_1x_coco --eval bbox --cfg-option dist_params.port=29680 &
echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29681 &
echo 'configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir tools/batch_test/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco --eval bbox --cfg-option dist_params.port=29682 &
echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir tools/batch_test/fovea_align_r50_fpn_gn-head_4x4_2x_coco --eval bbox --cfg-option dist_params.port=29683 &
echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir tools/batch_test/retinanet_free_anchor_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29684 &
echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir tools/batch_test/fsaf_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29685 &
echo 'configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco --eval bbox segm --cfg-option dist_params.port=29686 &
echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir tools/batch_test/gfl_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29687 &
echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_gn-all_2x_coco --eval bbox segm --cfg-option dist_params.port=29688 &
echo 'configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_gn_ws-all_1x_coco configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_gn_ws-all_1x_coco --eval bbox --cfg-option dist_params.port=29689 &
echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir tools/batch_test/grid_rcnn_r50_fpn_gn-head_2x_coco --eval bbox --cfg-option dist_params.port=29690 &
echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_groie_1x_coco --eval bbox --cfg-option dist_params.port=29691 &
echo 'configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_retinanet_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir tools/batch_test/ga_retinanet_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29692 &
echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth --work-dir tools/batch_test/ga_faster_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29693 &
echo 'configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir tools/batch_test/faster_rcnn_hrnetv2p_w18_1x_coco --eval bbox --cfg-option dist_params.port=29694 &
echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir tools/batch_test/htc_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29695 &
echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir tools/batch_test/libra_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29696 &
echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29697 &
echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir tools/batch_test/ms_rcnn_r50_caffe_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29698 &
echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir tools/batch_test/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --eval bbox --cfg-option dist_params.port=29699 &
echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir tools/batch_test/retinanet_r50_nasfpn_crop640_50e_coco --eval bbox --cfg-option dist_params.port=29700 &
echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir tools/batch_test/paa_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29701 &
echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir tools/batch_test/faster_rcnn_r50_pafpn_1x_coco --eval bbox --cfg-option dist_params.port=29702 &
echo 'configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pisa_faster_rcnn_r50_fpn_1x_coco configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir tools/batch_test/pisa_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29703 &
echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir tools/batch_test/point_rend_r50_caffe_fpn_mstrain_1x_coco --eval bbox segm --cfg-option dist_params.port=29704 &
echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir tools/batch_test/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29705 &
echo 'configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION reppoints_moment_r50_fpn_1x_coco configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir tools/batch_test/reppoints_moment_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29706 &
echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir tools/batch_test/faster_rcnn_r2_101_fpn_2x_coco --eval bbox --cfg-option dist_params.port=29707 &
echo 'configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir tools/batch_test/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco --eval bbox --cfg-option dist_params.port=29708 &
echo 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir tools/batch_test/retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29709 &
echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir tools/batch_test/rpn_r50_fpn_1x_coco --eval proposal_fast --cfg-option dist_params.port=29710 &
echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir tools/batch_test/sabl_retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29711 &
echo 'configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_faster_rcnn_r50_fpn_1x_coco configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir tools/batch_test/sabl_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29712 &
echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir tools/batch_test/scnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29713 &
echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir tools/batch_test/sparse_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29714 &
echo 'configs/ssd/ssd300_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir tools/batch_test/ssd300_coco --eval bbox --cfg-option dist_params.port=29715 &
echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir tools/batch_test/tridentnet_r50_caffe_1x_coco --eval bbox --cfg-option dist_params.port=29716 &
echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir tools/batch_test/vfnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29717 &
echo 'configs/yolact/yolact_r50_1x8_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolact_r50_1x8_coco configs/yolact/yolact_r50_1x8_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir tools/batch_test/yolact_r50_1x8_coco --eval bbox segm --cfg-option dist_params.port=29718 &
echo 'configs/yolo/yolov3_d53_320_273e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir tools/batch_test/yolov3_d53_320_273e_coco --eval bbox --cfg-option dist_params.port=29719 &
echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir tools/batch_test/yolof_r50_c5_8x8_1x_coco --eval bbox --cfg-option dist_params.port=29720 &
echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir tools/batch_test/centernet_resnet18_dcnv2_140e_coco --eval bbox --cfg-option dist_params.port=29721 &
echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth --work-dir tools/batch_test/yolox_tiny_8x8_300e_coco --eval bbox --cfg-option dist_params.port=29722 &
echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir tools/batch_test/ssdlite_mobilenetv2_scratch_600e_coco --eval bbox --cfg-option dist_params.port=29723 &
| 23,366 | 193.725 | 467 | sh |
mmdetection | mmdetection-master/.dev_scripts/test_init_backbone.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Check out backbone whether successfully load pretrained checkpoint."""
import copy
import os
from os.path import dirname, exists, join
import pytest
from mmcv import Config, ProgressBar
from mmcv.runner import _load_checkpoint
from mmdet.models import build_detector
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _traversed_config_file():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
need add the folder name in `ignores_folder` (if the config files in this
folder all set backbone.init_cfg is None) or add config name in
`ignores_file` (if the config file set backbone.init_cfg is None)
"""
config_path = _get_config_directory()
check_cfg_names = []
# `base`, `legacy_1.x` and `common` ignored by default.
ignores_folder = ['_base_', 'legacy_1.x', 'common']
# 'ld' need load teacher model, if want to check 'ld',
# please check teacher_config path first.
ignores_folder += ['ld']
# `selfsup_pretrain` need convert model, if want to check this model,
# need to convert the model first.
ignores_folder += ['selfsup_pretrain']
# the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
# 'scratch' is None.
# the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
# is None
# Please confirm `bockbone.init_cfg` is None first.
ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
for config_file_name in os.listdir(config_path):
if config_file_name not in ignores_folder:
config_file = join(config_path, config_file_name)
if os.path.isdir(config_file):
for config_sub_file in os.listdir(config_file):
if config_sub_file.endswith('py') and \
config_sub_file not in ignores_file:
name = join(config_file, config_sub_file)
check_cfg_names.append(name)
return check_cfg_names
def _check_backbone(config, print_cfg=True):
"""Check out backbone whether successfully load pretrained model, by using
`backbone.init_cfg`.
First, using `mmcv._load_checkpoint` to load the checkpoint without
loading models.
Then, using `build_detector` to build models, and using
`model.init_weights()` to initialize the parameters.
Finally, assert weights and bias of each layer loaded from pretrained
checkpoint are equal to the weights and bias of original checkpoint.
For the convenience of comparison, we sum up weights and bias of
each loaded layer separately.
Args:
config (str): Config file path.
print_cfg (bool): Whether print logger and return the result.
Returns:
results (str or None): If backbone successfully load pretrained
checkpoint, return None; else, return config file path.
"""
if print_cfg:
print('-' * 15 + 'loading ', config)
cfg = Config.fromfile(config)
init_cfg = None
try:
init_cfg = cfg.model.backbone.init_cfg
init_flag = True
except AttributeError:
init_flag = False
if init_cfg is None or init_cfg.get('type') != 'Pretrained':
init_flag = False
if init_flag:
checkpoint = _load_checkpoint(init_cfg.checkpoint)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
checkpoint_layers = state_dict.keys()
for name, value in model.backbone.state_dict().items():
if name in checkpoint_layers:
assert value.equal(state_dict[name])
if print_cfg:
print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
'\n', )
return None
else:
if print_cfg:
print(config + '\n' + '-' * 10 +
'config file do not have init_cfg' + '-' * 10 + '\n')
return config
@pytest.mark.parametrize('config', _traversed_config_file())
def test_load_pretrained(config):
"""Check out backbone whether successfully load pretrained model by using
`backbone.init_cfg`.
Details please refer to `_check_backbone`
"""
_check_backbone(config, print_cfg=False)
def _test_load_pretrained():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
Returns:
check_cfg_names (list[str]): Config files that backbone initialized
from pretrained checkpoint might be problematic. Need to recheck
the config file. The output including the config files that the
backbone.init_cfg is None
"""
check_cfg_names = _traversed_config_file()
need_check_cfg = []
prog_bar = ProgressBar(len(check_cfg_names))
for config in check_cfg_names:
init_cfg_name = _check_backbone(config)
if init_cfg_name is not None:
need_check_cfg.append(init_cfg_name)
prog_bar.update()
print('These config files need to be checked again')
print(need_check_cfg)
| 6,625 | 35.406593 | 78 | py |
mmdetection | mmdetection-master/.dev_scripts/train_benchmark.sh | echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py ./tools/work_dir/atss_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py ./tools/work_dir/autoassign_r50_fpn_8x2_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/cascade_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py ./tools/work_dir/centernet_resnet18_dcnv2_140e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' &
GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py ./tools/work_dir/centripetalnet_hourglass104_mstest_16x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py ./tools/work_dir/cornernet_hourglass104_mstest_8x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/detectors/detectors_htc_r50_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py ./tools/work_dir/detectors_htc_r50_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' &
GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py ./tools/work_dir/deformable_detr_r50_16x2_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/detr/detr_r50_8x2_150e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py ./tools/work_dir/detr_r50_8x2_150e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dh_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dynamic_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_dc5_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_ohem_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_ohem_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py ./tools/work_dir/fovea_align_r50_fpn_gn-head_4x4_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_fp16_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_fpn_fp16_1x_coco configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py ./tools/work_dir/retinanet_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_free_anchor_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py ./tools/work_dir/fsaf_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py ./tools/work_dir/gfl_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_ghm_r50_fpn_1x_coco configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_ghm_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py ./tools/work_dir/grid_rcnn_r50_fpn_gn-head_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ga_faster_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py ./tools/work_dir/htc_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ld_r18_gflv1_r101_fpn_coco_1x configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py ./tools/work_dir/ld_r18_gflv1_r101_fpn_coco_1x --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/libra_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py ./tools/work_dir/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ms_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' &
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ./tools/work_dir/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py ./tools/work_dir/paa_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab pisa_mask_rcnn_r50_fpn_1x_coco configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/pisa_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/point_rend_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab reppoints_moment_r50_fpn_gn-neck+head_1x_coco configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py ./tools/work_dir/reppoints_moment_r50_fpn_gn-neck+head_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_caffe_fpn_1x_coco configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py ./tools/work_dir/retinanet_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py ./tools/work_dir/rpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ./tools/work_dir/sabl_retinanet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/ssd/ssd300_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssd300_coco configs/ssd/ssd300_coco.py ./tools/work_dir/ssd300_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py ./tools/work_dir/tridentnet_r50_caffe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py ./tools/work_dir/vfnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/yolact/yolact_r50_8x8_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolact_r50_8x8_coco configs/yolact/yolact_r50_8x8_coco.py ./tools/work_dir/yolact_r50_8x8_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/yolo/yolov3_d53_320_273e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py ./tools/work_dir/yolov3_d53_320_273e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/sparse_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py ./tools/work_dir/scnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py ./tools/work_dir/yolof_r50_c5_8x8_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_carafe_1x_coco configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_carafe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_mdpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_mdpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_dpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_dpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn_ws-all_2x_coco configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn_ws-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py ./tools/work_dir/mask_rcnn_hrnetv2p_w18_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_pafpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py ./tools/work_dir/retinanet_r50_nasfpn_crop640_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py ./tools/work_dir/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ./tools/work_dir/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py ./tools/work_dir/faster_rcnn_r2_101_fpn_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_groie_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_1x_cityscapes configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py ./tools/work_dir/mask_rcnn_r50_fpn_1x_cityscapes --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab panoptic_fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ./tools/work_dir/panoptic_fpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py ./tools/work_dir/yolox_tiny_8x8_300e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' &
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py ./tools/work_dir/ssdlite_mobilenetv2_scratch_600e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
| 22,182 | 163.318519 | 336 | sh |
mmdetection | mmdetection-master/.github/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or
advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic
address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
[homepage]: https://www.contributor-covenant.org
| 3,355 | 42.584416 | 87 | md |
mmdetection | mmdetection-master/.github/CONTRIBUTING.md | We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
| 213 | 106 | 212 | md |
mmdetection | mmdetection-master/.github/pull_request_template.md | Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
## Motivation
Please describe the motivation of this PR and the goal you want to achieve through this PR.
## Modification
Please briefly describe what modification is made in this PR.
## BC-breaking (Optional)
Does the modification introduce changes that break the backward-compatibility of the downstream repos?
If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.
## Use cases (Optional)
If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
## Checklist
1. Pre-commit or other linting tools are used to fix the potential lint issues.
2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMCls.
4. The documentation has been modified accordingly, like docstring or example tutorials.
| 1,310 | 49.423077 | 264 | md |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/1-bug-report.yml | name: "🐞 Bug report"
description: "Create a report to help us reproduce and fix the bug"
labels: "kind/bug,status/unconfirmed"
title: "[Bug] "
body:
- type: markdown
attributes:
value: |
If you have already identified the reason, we strongly appreciate you creating a new PR to fix it [here](https://github.com/open-mmlab/mmdetection/pulls)!
If this issue is about installing MMCV, please file an issue at [MMCV](https://github.com/open-mmlab/mmcv/issues/new/choose).
If you need our help, please fill in as much of the following form as you're able to.
**The less clear the description, the longer it will take to solve it.**
- type: checkboxes
attributes:
label: Prerequisite
description: Please check the following items before creating a new issue.
options:
- label: I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.
required: true
- label: I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
required: true
- label: The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).
required: true
- type: dropdown
id: task
attributes:
label: Task
description: The problem arises when
options:
- I'm using the official example scripts/configs for the officially supported tasks/models/datasets.
- I have modified the scripts/configs, or I'm working on my own tasks/models/datasets.
validations:
required: true
- type: dropdown
id: branch
attributes:
label: Branch
description: The problem arises when I'm working on
options:
- master branch https://github.com/open-mmlab/mmdetection
- 3.x branch https://github.com/open-mmlab/mmdetection/tree/3.x
validations:
required: true
- type: textarea
attributes:
label: Environment
description: |
Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and copy-paste it here.
You may add additional information that may be helpful for locating the problem, such as
- How you installed PyTorch \[e.g., pip, conda, source\]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
validations:
required: true
- type: textarea
attributes:
label: Reproduces the problem - code sample
description: |
Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
placeholder: |
```python
# Sample code to reproduce the problem
```
validations:
required: true
- type: textarea
attributes:
label: Reproduces the problem - command or script
description: |
What command or script did you run?
placeholder: |
```shell
The command or script you run.
```
validations:
required: true
- type: textarea
attributes:
label: Reproduces the problem - error message
description: |
Please provide the error message or logs you got, with the full traceback.
placeholder: |
```
The error message or logs you got, with the full traceback.
```
validations:
required: true
- type: textarea
attributes:
label: Additional information
description: Tell us anything else you think we should know.
placeholder: |
1. What's your expected result?
2. What dataset did you use?
3. What do you think might be the reason?
| 3,938 | 36.160377 | 200 | yml |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/2-feature-request.yml | name: 🚀 Feature request
description: Suggest an idea for this project
labels: "kind/enhancement,status/unconfirmed"
title: "[Feature] "
body:
- type: markdown
attributes:
value: |
We strongly appreciate you creating a PR to implement this feature [here](https://github.com/open-mmlab/mmdetection/pulls)!
If you need our help, please fill in as much of the following form as you're able to.
**The less clear the description, the longer it will take to solve it.**
- type: textarea
attributes:
label: What's the feature?
description: |
Tell us more about the feature and how this feature can help.
placeholder: |
E.g., It is inconvenient when \[....\].
This feature can \[....\].
validations:
required: true
- type: textarea
attributes:
label: Any other context?
description: |
Have you considered any alternative solutions or features? If so, what are they?
Also, feel free to add any other context or screenshots about the feature request here.
| 1,077 | 32.6875 | 131 | yml |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/3-new-model.yml | name: "\U0001F31F New model/dataset/scheduler addition"
description: Submit a proposal/request to implement a new model / dataset / scheduler
labels: "kind/feature,status/unconfirmed"
title: "[New Models] "
body:
- type: textarea
id: description-request
validations:
required: true
attributes:
label: Model/Dataset/Scheduler description
description: |
Put any and all important information relative to the model/dataset/scheduler
- type: checkboxes
attributes:
label: Open source status
description: |
Please provide the open-source status, which would be very helpful
options:
- label: "The model implementation is available"
- label: "The model weights are available."
- type: textarea
id: additional-info
attributes:
label: Provide useful links for the implementation
description: |
Please provide information regarding the implementation, the weights, and the authors.
Please mention the authors by @gh-username if you're aware of their usernames.
| 1,084 | 31.878788 | 94 | yml |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/4-documentation.yml | name: 📚 Documentation
description: Report an issue related to the documentation.
labels: "kind/doc,status/unconfirmed"
title: "[Docs] "
body:
- type: dropdown
id: branch
attributes:
label: Branch
description: This issue is related to the
options:
- master branch https://mmdetection.readthedocs.io/en/latest/
- 3.x branch https://mmdetection.readthedocs.io/en/3.x/
validations:
required: true
- type: textarea
attributes:
label: 📚 The doc issue
description: >
A clear and concise description the issue.
validations:
required: true
- type: textarea
attributes:
label: Suggest a potential alternative/fix
description: >
Tell us how we could improve the documentation in this regard.
- type: markdown
attributes:
value: >
Thanks for contributing 🎉!
| 834 | 22.857143 | 68 | yml |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/5-reimplementation.yml | name: "💥 Reimplementation Questions"
description: "Ask about questions during model reimplementation"
labels: "kind/enhancement,status/unconfirmed"
title: "[Reimplementation] "
body:
- type: markdown
attributes:
value: |
We strongly appreciate you creating a PR to implement this feature [here](https://github.com/open-mmlab/mmdetection/pulls)!
If you need our help, please fill in as much of the following form as you're able to.
**The less clear the description, the longer it will take to solve it.**
- type: checkboxes
attributes:
label: Prerequisite
description: Please check the following items before creating a new issue.
options:
- label: I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.
required: true
- label: I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
required: true
- label: The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).
required: true
- type: textarea
attributes:
label: 💬 Describe the reimplementation questions
description: |
A clear and concise description of what the problem you meet and what have you done.
There are several common situations in the reimplementation issues as below
1. Reimplement a model in the model zoo using the provided configs
2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets)
3. Reimplement a custom model but all the components are implemented in MMDetection
4. Reimplement a custom model with new modules implemented by yourself
There are several things to do for different cases as below.
- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue.
- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write.
- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you.
placeholder: |
A clear and concise description of what the bug is.
What config dir you run?
```none
A placeholder for the config.
```
```shell
The command or script you run.
```
```
The error message or logs you got, with the full traceback.
```
validations:
required: true
- type: textarea
attributes:
label: Environment
description: |
Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.
You may add addition that may be helpful for locating the problem, such as
- How you installed PyTorch \[e.g., pip, conda, source\]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
validations:
required: true
- type: textarea
attributes:
label: Expected results
description: If applicable, paste the related results here, e.g., what you expect and what you get.
placeholder: |
```none
A placeholder for results comparison
```
- type: textarea
attributes:
label: Additional information
description: Tell us anything else you think we should know.
placeholder: |
1. Did you make any modifications on the code or config? Did you understand what you have modified?
2. What dataset did you use?
3. What do you think might be the reason?
| 4,281 | 46.577778 | 441 | yml |
mmdetection | mmdetection-master/.github/ISSUE_TEMPLATE/config.yml | blank_issues_enabled: true
contact_links:
- name: 💬 Forum
url: https://github.com/open-mmlab/mmdetection/discussions
about: Ask general usage questions and discuss with other MMDetection community members
- name: 🌐 Explore OpenMMLab
url: https://openmmlab.com/
about: Get know more about OpenMMLab
| 319 | 31 | 91 | yml |
mmdetection | mmdetection-master/.github/workflows/build.yml | name: build
on:
push:
paths-ignore:
- ".dev_scripts/**"
- ".github/**.md"
- "demo/**"
- "docker/**"
- "tools/**"
- "README.md"
- "README_zh-CN.md"
pull_request:
paths-ignore:
- ".dev_scripts/**"
- ".github/**.md"
- "demo/**"
- "docker/**"
- "docs/**"
- "docs_zh-CN/**"
- "tools/**"
- "README.md"
- "README_zh-CN.md"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0, 1.10.1]
include:
- torch: 1.5.1
torchvision: 0.6.1
mmcv: 1.5
- torch: 1.6.0
torchvision: 0.7.0
mmcv: 1.6
- torch: 1.7.0
torchvision: 0.8.1
mmcv: 1.7
- torch: 1.8.0
torchvision: 0.9.0
mmcv: 1.8
- torch: 1.9.0
torchvision: 0.10.0
mmcv: 1.9
- torch: 1.10.1
torchvision: 0.11.2
mmcv: "1.10"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install MMCV
run: |
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${{matrix.mmcv}}/index.html
python -c 'import mmcv; print(mmcv.__version__)'
- name: Install unittest dependencies
run: |
pip install -r requirements/tests.txt -r requirements/optional.txt
pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
pip install git+https://github.com/cocodataset/panopticapi.git
- name: Build and install
run: rm -rf .eggs && pip install -e .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet -m pytest tests/
coverage xml
coverage report -m
build_cuda101:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
strategy:
matrix:
python-version: [3.7]
torch: [1.5.1+cu101, 1.6.0+cu101, 1.7.0+cu101, 1.8.0+cu101]
include:
- torch: 1.5.1+cu101
torch_version: torch1.5.1
torchvision: 0.6.1+cu101
mmcv: 1.5
- torch: 1.6.0+cu101
torch_version: torch1.6.0
torchvision: 0.7.0+cu101
mmcv: 1.6
- torch: 1.7.0+cu101
torch_version: torch1.7.0
torchvision: 0.8.1+cu101
mmcv: 1.7
- torch: 1.8.0+cu101
torch_version: torch1.8.0
torchvision: 0.9.0+cu101
mmcv: 1.8
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
- name: Install system dependencies
run: |
apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies for compiling onnx when python=3.9
run: python -m pip install "protobuf <= 3.20.1" && apt-get install libprotobuf-dev protobuf-compiler
if: ${{matrix.python-version == '3.9'}}
- name: Install mmdet dependencies
run: |
python -V
export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
export CXXFLAGS="${CFLAGS}"
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.mmcv}}/index.html
python -m pip install pycocotools
python -m pip install -r requirements/tests.txt -r requirements/optional.txt
python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet -m pytest tests/
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/[email protected]
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_cuda102:
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
strategy:
matrix:
python-version: [3.7, 3.8, 3.9]
torch: [1.9.0+cu102, 1.10.1+cu102]
include:
- torch: 1.9.0+cu102
torch_version: torch1.9.0
torchvision: 0.10.0+cu102
mmcv: 1.9
- torch: 1.10.1+cu102
torch_version: torch1.10.1
torchvision: 0.11.2+cu102
mmcv: "1.10"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Fetch GPG keys
run: |
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
# Add ppa source repo for python3.9.
- name: Add python3.9 source
run: |
apt-get update && apt-get install -y software-properties-common
add-apt-repository -y ppa:deadsnakes/ppa
if: ${{matrix.python-version == '3.9'}}
# Install python-dev for some packages which require libpython3.Xm.
# Github's setup-python cannot install python3.9-dev, so we have to use apt install.
# Set DEBIAN_FRONTEND=noninteractive to avoid some interactions.
- name: Install python-dev
run: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends python${{matrix.python-version}}-dev
- name: Install system dependencies
run: |
apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies for compiling onnx when python=3.9
run: python -m pip install "protobuf <= 3.20.1" && apt-get update && apt-get -y install libprotobuf-dev protobuf-compiler cmake
if: ${{matrix.python-version == '3.9'}}
- name: Install mmdet dependencies
run: |
python -V
export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
export CXXFLAGS="${CFLAGS}"
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/torch${{matrix.mmcv}}/index.html
python -m pip install pycocotools
python -m pip install -r requirements/tests.txt -r requirements/optional.txt
python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=7.0 pip install .
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmdet -m pytest tests/
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
files: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
build_windows:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2022]
python: [3.8]
platform: [cpu, cu111]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Upgrade pip
run: python -m pip install pip --upgrade --user
- name: Install PyTorch
# As a complement to Linux CI, we test on PyTorch LTS version
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install MMCV
run: pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
- name: Install unittest dependencies
run: |
python -V
python -m pip install pycocotools
python -m pip install -r requirements/tests.txt -r requirements/optional.txt
python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
python -m pip install git+https://github.com/cocodataset/panopticapi.git
python -c 'import mmcv; print(mmcv.__version__)'
- name: Show pip list
run: pip list
- name: Build and install
run: pip install -e .
- name: Run unittests
run: coverage run --branch --source mmdet -m pytest tests
- name: Generate coverage report
run: |
coverage xml
coverage report -m
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
| 11,161 | 37.891986 | 166 | yml |
mmdetection | mmdetection-master/.github/workflows/build_pat.yml | name: build_pat
on: push
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build_parrots:
runs-on: ubuntu-latest
container:
image: ghcr.io/zhouzaida/parrots-mmcv:1.3.4
credentials:
username: zhouzaida
password: ${{ secrets.CR_PAT }}
steps:
- uses: actions/checkout@v2
- name: Install mmdet dependencies
run: |
git clone https://github.com/open-mmlab/mmcv.git && cd mmcv
MMCV_WITH_OPS=1 python setup.py install
cd .. && rm -rf mmcv
python -c 'import mmcv; print(mmcv.__version__)'
pip install -r requirements.txt
- name: Build and install
run: rm -rf .eggs && pip install -e .
| 783 | 23.5 | 69 | yml |
mmdetection | mmdetection-master/.github/workflows/deploy.yml | name: deploy
on: push
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build-n-publish:
runs-on: ubuntu-latest
if: startsWith(github.event.ref, 'refs/tags')
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install torch
run: pip install torch
- name: Install wheel
run: pip install wheel
- name: Build MMDetection
run: python setup.py sdist bdist_wheel
- name: Publish distribution to PyPI
run: |
pip install twine
twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
| 765 | 22.9375 | 74 | yml |
mmdetection | mmdetection-master/.github/workflows/lint.yml | name: lint
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install pre-commit hook
run: |
pip install pre-commit
pre-commit install
- name: Linting
run: pre-commit run --all-files
- name: Check docstring coverage
run: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdet
| 761 | 23.580645 | 135 | yml |
mmdetection | mmdetection-master/.github/workflows/stale.yml | name: 'Close stale issues and PRs'
on:
schedule:
# check issue and pull request once every day
- cron: '25 11 * * *'
permissions:
contents: read
jobs:
invalid-stale-close:
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
with:
stale-issue-message: 'This issue is marked as stale because it has been marked as invalid or awaiting response for 7 days without any further response. It will be closed in 5 days if the stale label is not removed or if there is no further response.'
stale-pr-message: 'This PR is marked as stale because there has been no activity in the past 45 days. It will be closed in 10 days if the stale label is not removed or if there is no further updates.'
close-issue-message: 'This issue is closed because it has been stale for 5 days. Please open a new issue if you have similar issues or you have any new updates now.'
close-pr-message: 'This PR is closed because it has been stale for 10 days. Please reopen this PR if you have any updates and want to keep contributing the code.'
# only issues/PRS with any of invalid and awaiting response labels are checked
any-of-labels: 'invalid, awaiting response'
days-before-issue-stale: 7
days-before-pr-stale: 45
days-before-issue-close: 5
days-before-pr-close: 10
# automatically remove the stale label when the issues or the pull reqquests are updated or commented
remove-stale-when-updated: true
| 1,598 | 48.96875 | 260 | yml |
mmdetection | mmdetection-master/.github/workflows/test_mim.yml | name: test-mim
on:
push:
paths:
- 'model-index.yml'
- 'configs/**'
pull_request:
paths:
- 'model-index.yml'
- 'configs/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build_cpu:
runs-on: ubuntu-18.04
strategy:
matrix:
python-version: [3.7]
torch: [1.8.0]
include:
- torch: 1.8.0
torch_version: torch1.8
torchvision: 0.9.0
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade pip
run: pip install pip --upgrade
- name: Install Pillow
run: pip install Pillow==6.2.2
if: ${{matrix.torchvision == '0.4.2'}}
- name: Install PyTorch
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
- name: Install openmim
run: pip install openmim
- name: Build and install
run: rm -rf .eggs && mim install -e .
- name: test commands of mim
run: mim search mmdet
| 1,293 | 24.372549 | 148 | yml |
mmdetection | mmdetection-master/configs/_base_/default_runtime.py | checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 791 | 27.285714 | 67 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/cityscapes_detection.py | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_train.json',
img_prefix=data_root + 'leftImg8bit/train/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'leftImg8bit/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_test.json',
img_prefix=data_root + 'leftImg8bit/test/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 1,937 | 33 | 79 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/cityscapes_instance.py | # dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_train.json',
img_prefix=data_root + 'leftImg8bit/train/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
img_prefix=data_root + 'leftImg8bit/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_test.json',
img_prefix=data_root + 'leftImg8bit/test/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
| 1,963 | 33.45614 | 79 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/coco_detection.py | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 1,711 | 33.24 | 77 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/coco_instance.py | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
| 1,737 | 33.76 | 77 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/coco_instance_semantic.py | # dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'stuffthingmaps/train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
| 1,922 | 33.963636 | 79 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/coco_panoptic.py | # dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['PQ'])
| 2,079 | 33.666667 | 79 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/deepfashion.py | # dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(750, 1101),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=train_pipeline,
data_root=data_root),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/DeepFashion_segmentation_gallery.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root))
evaluation = dict(interval=5, metric=['bbox', 'segm'])
| 1,888 | 33.981481 | 79 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/lvis_v0.5_instance.py | # dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_train.json',
img_prefix=data_root + 'train2017/')),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/'),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/'))
evaluation = dict(metric=['bbox', 'segm'])
| 786 | 30.48 | 68 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/lvis_v1_instance.py | # dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root))
evaluation = dict(metric=['bbox', 'segm'])
| 736 | 28.48 | 66 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/objects365v1_detection.py | # dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/objects365_train.json',
img_prefix=data_root + 'train/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/objects365_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/objects365_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 1,714 | 33.3 | 77 | py |
mmdetection | mmdetection-master/configs/_base_/datasets/objects365v2_detection.py | # dataset settings
dataset_type = 'Objects365V2Dataset'
data_root = 'data/Objects365/Obj365_v2/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/zhiyuan_objv2_train.json',
img_prefix=data_root + 'train/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
img_prefix=data_root + 'val/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 1,723 | 33.48 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.