repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
RAML | RAML-master/incremental/network/utils.py | from re import M
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
import json
class DeepLabHeadV3Plus_Metric(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36], finetune=False):
super(DeepLabHeadV3Plus_Metric, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.num_meta_channel = 4
#self.num_meta_channel = 2
#self.num_meta_channel = 6
# self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
self.bn=nn.BatchNorm2d(256)
self.relu=nn.ReLU(inplace=True)
#self.conv2=nn.Sequential(nn.Conv2d(256, 16, 1), nn.BatchNorm2d(16), nn.Sigmoid())
self.conv2=nn.Conv2d(256, num_classes, 1)
# ablation study : should be num_classes+self.num_meta_channel,equal to 20 in 16+3 mode
self.conv3=nn.Conv2d(num_classes, 20, 1)
self.finetune = finetune
if (self.finetune):
print("only train conv3 in classifier")
self._init_weight()
def forward(self, feature):
if (self.finetune):
with torch.no_grad():
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
else:
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
output2 = self.conv3(output1)
return output1, torch.sigmoid(output2), feature
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# class DropChannel(nn.Module):
# def __init__(self, n):
# super().__init__()
# self.n = n
# def forward(self, x):
# # x: (B, C, H, W)
# B, C, _, _ = x.shape
# m = torch.ones(B, C, 1, 1).float().to(x.device)
# if self.training:
# for i in np.random.choice(range(C), self.n, replace=False):
# m[:, i] = 0
# x = x * m
# return x, m
# class DeepLabHeadV3Plus_Metric(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus_Metric, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
# self.aspp = ASPP(in_channels, aspp_dilate)
# # self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
# self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
# self.bn=nn.BatchNorm2d(256)
# self.relu=nn.ReLU(inplace=True)
# self.conv2=nn.Conv2d(256, 16, 1)
# self.conv3=nn.Conv2d(16, 16, 1)
# self.drop3=DropChannel(4)
# self.conv4=nn.Conv2d(16+256, 4, 1)
# self._init_weight()
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# x = torch.cat([low_level_feature, output_feature], dim=1)
# x = self.conv1(x)
# x = self.bn(x)
# feature = self.relu(x)
# output1 = self.conv2(feature)
# output2 = torch.sigmoid(self.conv3(output1))
# output2, mask = self.drop3(output2)
# output2 = torch.cat([output2, torch.sigmoid(self.conv4(torch.cat([feature, output2], dim=1)))], dim=1)
# return output1, output2, feature, mask
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class _SimpleSegmentationModel_Metric(nn.Module):
def __init__(self, backbone, classifier, finetune=False):
super(_SimpleSegmentationModel_Metric, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.finetune = finetune
if (finetune):
print('freeze resnet backbone')
def forward(self, x):
input_shape = x.shape[-2:]
if (self.finetune):
with torch.no_grad():
features = self.backbone(x)
else:
features = self.backbone(x)
outputs1, outputs2, features = self.classifier(features)
#print(outputs1.shape, outputs2.shape)
outputs1 = F.interpolate(outputs1, size=input_shape, mode='bilinear', align_corners=False) # (B, 16, H, W)
outputs2 = F.interpolate(outputs2, size=input_shape, mode='bilinear', align_corners=False) # (B, 20, H, W)
outputs3 = (outputs2.unsqueeze(dim=1) * x.unsqueeze(dim=2)).sum(dim=2) # (B, 3, H, W)
'''
need to consider
'''
#features = F.interpolate(features,size=input_shape, mode='bilinear', align_corners=False)
return outputs1, outputs2, features, outputs3
class _SimpleSegmentationModel_embedding(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel_embedding, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.centers = torch.zeros(17, 17)
# idx = 0
# for i in range(19):
# if i <= 12 or i >=16:
# self.centers[idx] = torch.tensor(np.mean(np.array(prototype[idx]), axis=0))
# idx += 1
magnitude = 3
for i in range(17):
self.centers[i][i] = magnitude
# cnt = 0
# for i in range(17):
# if i <= 12:
# self.centers[cnt][cnt] = magnitude
# cnt += 1
# elif i > 13:
# self.centers[cnt+1][cnt] = magnitude
# cnt += 1
# self.centers[13] = torch.ones(1,16) * 3
# print(self.centers)
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers.shape)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class _SimpleSegmentationModel_embedding_self_distillation(nn.Module):
def __init__(self, backbone):
super(_SimpleSegmentationModel_embedding_self_distillation, self).__init__()
self.backbone = backbone
self.classifier_list = ['classifier']
self.cls_novel = 1
for i in range(self.cls_novel):
self.classifier_list.append('classifier_' + str(i+1))
inplanes = 2048
low_level_planes = 256
aspp_dilate = [6, 12, 18]
num_classes = 16
self.classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
for i in range(self.cls_novel):
self.__setattr__(self.classifier_list[i+1], DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes + i + 1, aspp_dilate))
self.centers = torch.zeros(17, 17)
def forward(self, x):
# for m in self.__getattr__(self.classifier_list[-1]).modules():
# if isinstance(m, nn.BatchNorm2d):
# m.train()
input_shape = x.shape[-2:]
features = self.backbone(x)
logits = []
centers = []
features_out = []
logits_0, centers_0, features_out_0 = self.forward_single(self.classifier, features, input_shape)
logits.append(logits_0)
centers.append(centers_0)
features_out.append(features_out_0)
for i in range(self.cls_novel):
classifier_temp = self.__getattr__(self.classifier_list[i+1])
logits_tmp, centers_tmp, features_out_tmp = self.forward_single(classifier_temp, features, input_shape)
logits.append(logits_tmp)
centers.append(centers_tmp)
features_out.append(features_out_tmp)
return logits, centers, features_out
def forward_single(self, classifier, features, input_shape):
x = classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
return new_module | 21,245 | 39.701149 | 136 | py |
RAML | RAML-master/incremental/network/backbone/resnet.py | import torch
import torch.nn as nn
#from torchvision.models.utils import load_state_dict_from_url
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 13,621 | 38.598837 | 107 | py |
RAML | RAML-master/incremental/network/backbone/mobilenetv2.py | from torch import nn
#from torchvision.models.utils import load_state_dict_from_url
from torch.hub import load_state_dict_from_url
import torch.nn.functional as F
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, groups=1):
#padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, 0, dilation=dilation, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
def fixed_padding(kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return (pad_beg, pad_end, pad_beg, pad_end)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, dilation=dilation, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
self.input_padding = fixed_padding( 3, dilation )
def forward(self, x):
x_pad = F.pad(x, self.input_padding)
if self.use_res_connect:
return x + self.conv(x_pad)
else:
return self.conv(x_pad)
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
self.output_stride = output_stride
current_stride = 1
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2)]
current_stride *= 2
dilation=1
previous_dilation = 1
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
previous_dilation = dilation
if current_stride == output_stride:
stride = 1
dilation *= s
else:
stride = s
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i==0:
features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
else:
features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
| 6,970 | 35.883598 | 123 | py |
RAML | RAML-master/incremental/network/.ipynb_checkpoints/utils-checkpoint.py | from re import M
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
import json
class DeepLabHeadV3Plus_Metric(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36], finetune=False):
super(DeepLabHeadV3Plus_Metric, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.num_meta_channel = 4
#self.num_meta_channel = 2
#self.num_meta_channel = 6
# self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
self.bn=nn.BatchNorm2d(256)
self.relu=nn.ReLU(inplace=True)
#self.conv2=nn.Sequential(nn.Conv2d(256, 16, 1), nn.BatchNorm2d(16), nn.Sigmoid())
self.conv2=nn.Conv2d(256, num_classes, 1)
# ablation study : should be num_classes+self.num_meta_channel,equal to 20 in 16+3 mode
self.conv3=nn.Conv2d(num_classes, 20, 1)
self.finetune = finetune
if (self.finetune):
print("only train conv3 in classifier")
self._init_weight()
def forward(self, feature):
if (self.finetune):
with torch.no_grad():
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
else:
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
x = torch.cat([low_level_feature, output_feature], dim=1)
x = self.conv1(x)
x = self.bn(x)
feature = self.relu(x)
output1 = self.conv2(feature)
output2 = self.conv3(output1)
return output1, torch.sigmoid(output2), feature
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# class DropChannel(nn.Module):
# def __init__(self, n):
# super().__init__()
# self.n = n
# def forward(self, x):
# # x: (B, C, H, W)
# B, C, _, _ = x.shape
# m = torch.ones(B, C, 1, 1).float().to(x.device)
# if self.training:
# for i in np.random.choice(range(C), self.n, replace=False):
# m[:, i] = 0
# x = x * m
# return x, m
# class DeepLabHeadV3Plus_Metric(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus_Metric, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
# self.aspp = ASPP(in_channels, aspp_dilate)
# # self.conv1=nn.Conv2d(304, 256, 1, padding=0, stride=1)
# self.conv1=nn.Conv2d(304, 256, 3, padding=1, bias=False)
# self.bn=nn.BatchNorm2d(256)
# self.relu=nn.ReLU(inplace=True)
# self.conv2=nn.Conv2d(256, 16, 1)
# self.conv3=nn.Conv2d(16, 16, 1)
# self.drop3=DropChannel(4)
# self.conv4=nn.Conv2d(16+256, 4, 1)
# self._init_weight()
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# x = torch.cat([low_level_feature, output_feature], dim=1)
# x = self.conv1(x)
# x = self.bn(x)
# feature = self.relu(x)
# output1 = self.conv2(feature)
# output2 = torch.sigmoid(self.conv3(output1))
# output2, mask = self.drop3(output2)
# output2 = torch.cat([output2, torch.sigmoid(self.conv4(torch.cat([feature, output2], dim=1)))], dim=1)
# return output1, output2, feature, mask
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class _SimpleSegmentationModel_Metric(nn.Module):
def __init__(self, backbone, classifier, finetune=False):
super(_SimpleSegmentationModel_Metric, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.finetune = finetune
if (finetune):
print('freeze resnet backbone')
def forward(self, x):
input_shape = x.shape[-2:]
if (self.finetune):
with torch.no_grad():
features = self.backbone(x)
else:
features = self.backbone(x)
outputs1, outputs2, features = self.classifier(features)
#print(outputs1.shape, outputs2.shape)
outputs1 = F.interpolate(outputs1, size=input_shape, mode='bilinear', align_corners=False) # (B, 16, H, W)
outputs2 = F.interpolate(outputs2, size=input_shape, mode='bilinear', align_corners=False) # (B, 20, H, W)
outputs3 = (outputs2.unsqueeze(dim=1) * x.unsqueeze(dim=2)).sum(dim=2) # (B, 3, H, W)
'''
need to consider
'''
#features = F.interpolate(features,size=input_shape, mode='bilinear', align_corners=False)
return outputs1, outputs2, features, outputs3
class _SimpleSegmentationModel_embedding(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel_embedding, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.centers = torch.zeros(17, 17)
# idx = 0
# for i in range(19):
# if i <= 12 or i >=16:
# self.centers[idx] = torch.tensor(np.mean(np.array(prototype[idx]), axis=0))
# idx += 1
magnitude = 3
for i in range(17):
self.centers[i][i] = magnitude
# cnt = 0
# for i in range(17):
# if i <= 12:
# self.centers[cnt][cnt] = magnitude
# cnt += 1
# elif i > 13:
# self.centers[cnt+1][cnt] = magnitude
# cnt += 1
# self.centers[13] = torch.ones(1,16) * 3
# print(self.centers)
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers.shape)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class _SimpleSegmentationModel_embedding_self_distillation(nn.Module):
def __init__(self, backbone):
super(_SimpleSegmentationModel_embedding_self_distillation, self).__init__()
self.backbone = backbone
self.classifier_list = ['classifier']
self.cls_novel = 1
for i in range(self.cls_novel):
self.classifier_list.append('classifier_' + str(i+1))
inplanes = 2048
low_level_planes = 256
aspp_dilate = [6, 12, 18]
num_classes = 16
self.classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
for i in range(self.cls_novel):
self.__setattr__(self.classifier_list[i+1], DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes + i + 1, aspp_dilate))
self.centers = torch.zeros(17, 17)
def forward(self, x):
# for m in self.__getattr__(self.classifier_list[-1]).modules():
# if isinstance(m, nn.BatchNorm2d):
# m.train()
input_shape = x.shape[-2:]
features = self.backbone(x)
logits = []
centers = []
features_out = []
logits_0, centers_0, features_out_0 = self.forward_single(self.classifier, features, input_shape)
logits.append(logits_0)
centers.append(centers_0)
features_out.append(features_out_0)
for i in range(self.cls_novel):
classifier_temp = self.__getattr__(self.classifier_list[i+1])
logits_tmp, centers_tmp, features_out_tmp = self.forward_single(classifier_temp, features, input_shape)
logits.append(logits_tmp)
centers.append(centers_tmp)
features_out.append(features_out_tmp)
return logits, centers, features_out
def forward_single(self, classifier, features, input_shape):
x = classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
output_size = x.size()
# print(output)
# print(np.unique(output.cpu().numpy()[0][0]))
features = x.permute(0, 2, 3, 1).contiguous() # batch * h * w * num_class
features_out = features
shape = features.size()
features = features.view(shape[0], shape[1] * shape[2], shape[3]) # batch * hw * num_class
num_classes = output_size[1]
features_shape = features.size()
features = features.unsqueeze(2).expand(features_shape[0], features_shape[1], num_classes,
features_shape[2]) # batch * hw * num_class * num_class
# print(features.size())
# print(self.centers.size())
self.centers = torch.zeros(shape[3], shape[3])
m = 3
for i in range(shape[3]):
self.centers[i][i] = m
# print(self.centers)
dists = features - self.centers.cuda() # batch * hw * num_classes * c
# print(dists.size())
dist2mean = -torch.sum(dists ** 2, 3) # batch * hw * num_classes
# print(dist2mean.size())
# m = nn.Softmax(dim=2)
# prob = m(dist2mean) # batch * hw * num_classes
# print(prob)
x = dist2mean.permute(0, 2, 1).contiguous().view(output_size[0], num_classes, output_size[2],
output_size[3])
return x, self.centers.cuda(), features_out
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
return new_module | 21,245 | 39.701149 | 136 | py |
RAML | RAML-master/incremental/network/.ipynb_checkpoints/_deeplab-checkpoint.py | import torch
from torch import nn
from torch.nn import functional as F
from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
pass
class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
# class DeepLabHeadV3Plus(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
#
# self.aspp = ASPP(in_channels, aspp_dilate)
#
# self.classifier = nn.Sequential(
# nn.Conv2d(304, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# class DeepLabHead(nn.Module):
# def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHead, self).__init__()
#
# self.classifier = nn.Sequential(
# ASPP(in_channels, aspp_dilate),
# nn.Conv2d(256, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# return self.classifier( feature['out'] )
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class AtrousSeparableConvolution(nn.Module):
# """ Atrous Separable Convolution
# """
# def __init__(self, in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1, bias=True):
# super(AtrousSeparableConvolution, self).__init__()
# self.body = nn.Sequential(
# # Separable Conv
# nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
# # PointWise Conv
# nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
# )
#
# self._init_weight()
#
# def forward(self, x):
# return self.body(x)
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class ASPPConv(nn.Sequential):
# def __init__(self, in_channels, out_channels, dilation):
# modules = [
# nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)
# ]
# super(ASPPConv, self).__init__(*modules)
#
# class ASPPPooling(nn.Sequential):
# def __init__(self, in_channels, out_channels):
# super(ASPPPooling, self).__init__(
# nn.AdaptiveAvgPool2d(1),
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True))
#
# def forward(self, x):
# size = x.shape[-2:]
# x = super(ASPPPooling, self).forward(x)
# return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
#
# class ASPP(nn.Module):
# def __init__(self, in_channels, atrous_rates):
# super(ASPP, self).__init__()
# out_channels = 256
# modules = []
# modules.append(nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)))
#
# rate1, rate2, rate3 = tuple(atrous_rates)
# modules.append(ASPPConv(in_channels, out_channels, rate1))
# modules.append(ASPPConv(in_channels, out_channels, rate2))
# modules.append(ASPPConv(in_channels, out_channels, rate3))
# modules.append(ASPPPooling(in_channels, out_channels))
#
# self.convs = nn.ModuleList(modules)
#
# self.project = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True),
# nn.Dropout(0.1),)
#
# def forward(self, x):
# res = []
# for conv in self.convs:
# res.append(conv(x))
# res = torch.cat(res, dim=1)
# return self.project(res)
#
#
#
# def convert_to_separable_conv(module):
# new_module = module
# if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
# new_module = AtrousSeparableConvolution(module.in_channels,
# module.out_channels,
# module.kernel_size,
# module.stride,
# module.padding,
# module.dilation,
# module.bias)
# for name, child in module.named_children():
# new_module.add_module(name, convert_to_separable_conv(child))
# return new_module | 8,740 | 39.281106 | 165 | py |
RAML | RAML-master/incremental/.ipynb_checkpoints/main-checkpoint.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--finetune", action='store_true', default=False)
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=1000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=6,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=4,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=768)
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0,1',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output', help="output path")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
class BinaryDiceLoss(nn.Module):
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class MyDiceLoss(nn.Module):
def __init__(self, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.ignore_index = ignore_index
def forward(self, logit, label_lst, class_lst):
loss = 0.0
for b in range(logit.shape[0]):
logit_b = logit[b][torch.where(class_lst[b] != self.ignore_index)]
label_lst_b = label_lst[b][torch.where(class_lst[b] != self.ignore_index)]
if logit_b.shape[0]:
loss += self.dice_criterion(logit_b, label_lst_b)
return loss / logit.shape[0]
class CDiceLoss(nn.Module):
def __init__(self, known_class=16, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.bce_criterion = nn.BCELoss()
self.ignore_index = ignore_index
self.class_num=known_class
print('finetune with '+str(known_class)+" classes")
def forward(self, logit, label_lst, class_lst):
loss1 = torch.FloatTensor([0.0]).to(logit.device)
for i in range(self.class_num):
loss1 += (self.dice_criterion(logit[:, i], label_lst[:, i]) + self.bce_criterion(logit[:, i], label_lst[:, i].float()))
loss1 /= self.class_num
loss2 = 0.0
for i in range(self.class_num, logit.shape[1]):
loss2 += -torch.log((torch.mean(logit[:, i]) * 50).clamp(0, 1))
loss2 /= (logit.shape[1] - self.class_num)
loss3 = 0.0
num3 = 0
for i in range(logit.shape[1]):
for j in range(logit.shape[1]):
if i == j: continue
dice_loss = self.dice_criterion(logit[:, i], logit[:, j])
loss3 += (1.0 - dice_loss)
num3 += 1
loss3 = loss3 / num3
loss = (loss1 + loss2 + loss3) * 0.1
return {
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
}
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def visualize(image, label, logit, label_lst, class_lst, save_path=None, denorm=None):
# logit: (256, H, W)
if not isinstance(image, np.ndarray):
image = image.detach().cpu().numpy()
label = label.detach().cpu().numpy()
logit = logit.detach().cpu().numpy()
label_lst = label_lst.detach().cpu().numpy()
class_lst = class_lst.detach().cpu().numpy()
if denorm:
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
_, axarr = plt.subplots(2, (1+logit.shape[0]), figsize=(5*(1+logit.shape[0]), 10))
axarr[0][0].imshow(image)
label[label == 255] = 0
axarr[1][0].imshow(label)
for i in range(logit.shape[0]):
if i < label_lst.shape[0]:
axarr[0][1+i].imshow(label_lst[i])
axarr[1][i+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# _, axarr = plt.subplots(16, 32, figsize=(40, 20))
# for i in range(label.shape[0]):
# axarr[i//16][(i%16)*2].imshow(label[i])
# axarr[i//16][(i%16)*2].set_xticks([])
# axarr[i//16][(i%16)*2].set_yticks([])
# for i in range(logit.shape[0]):
# axarr[i//16][(i%16)*2+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# axarr[i//16][(i%16)*2+1].set_xticks([])
# axarr[i//16][(i%16)*2+1].set_yticks([])
# label[label == 255] = 19
# C = logit.shape[0]
# logit = np.argmax(logit, axis=0)
# mask = np.zeros_like(logit)
# for c in range(C):
# t = class_lst[c]
# if t == 255: t = 19
# temp = (logit == c).astype(np.uint8)
# mask = np.ones_like(logit) * t * temp + mask * (1 - temp)
# _, axarr = plt.subplots(1, 3, figsize=(15, 5))
# axarr[0].imshow(image)
# axarr[1].imshow(label)
# axarr[2].imshow(mask)
if save_path:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def val(opts, model, val_loader, device):
metrics = StreamSegMetrics(19)
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
model.eval()
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
for batch_idx, (images, labels, _, _, _) in tqdm(enumerate(val_loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, _ = model(images)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
#print(labels.shape, outputs.shape)
metrics.update(labels[0].detach().cpu().numpy(), outputs)
score = metrics.get_results()
print(str(opts.num_classes)+' classes')
print(metrics.to_str(score))
def train_stage1(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
#l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
#loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
#masks = ((labels.unsqueeze(dim=1)) != 255).float()
#loss_l2 = l2_criterion(res_images, images) * 0.01
#loss['loss'] += (loss_seg + loss_l2)
##loss['loss_l2'] = loss_l2
if ("seg" not in epoch_records): epoch_records["seg"]=[]
epoch_records["seg"].append(loss_seg.cpu().data.numpy())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss_seg.backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 1000 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
'''
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
'''
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
def train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, logits, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
masks = ((labels.unsqueeze(dim=1)) != 255).float()
loss_l2 = l2_criterion(res_images, images) * 0.01
loss['loss'] += loss_l2
loss['loss'] += loss_seg
loss['loss_seg'] = loss_seg
loss['loss_l2'] = loss_l2
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 500 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
# if batch_idx % 10 == 0:
# val(opts, model, val_loader, device)
# model.train()
import torch
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except Exception:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
opts.num_classes = 256
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
remain_class = 19 - len(train_dst.unknown_target)
print('class num : '+str(remain_class))
opts.num_classes=remain_class
model = model_map[opts.model](num_classes=remain_class, output_stride=opts.output_stride, metric_dim=opts.metric_dim, finetune=False)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
if (opts.finetune):
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
else:
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CDiceLoss(remain_class).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model_state_dict = model.state_dict()
checkpoint_state_dict = checkpoint["model_state"]
for key in checkpoint_state_dict:
if model_state_dict[key].shape != checkpoint_state_dict[key].shape:
print(key)
continue
model_state_dict[key] = checkpoint_state_dict[key]
model.load_state_dict(model_state_dict)
#model.load_state_dict(checkpoint["model_state"])
#model = nn.DataParallel(model)
device_ids=list(map(int, opts.gpu_id.split(',')))
#torch.cuda.set_device(device_ids[0])
print(device_ids)
#model = nn.DataParallel(model, device_ids=list(map(int, opts.gpu_id.split(','))))
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
#model = BalancedDataParallel(2, model, dim=0, device_ids=list(map(int, opts.gpu_id.split(','))))
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
#model = nn.DataParallel(model)
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
model.to(device)
if (opts.finetune):
train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
else:
train_stage1(opts, model, train_loader, val_loader, None, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 28,621 | 42.170437 | 171 | py |
RAML | RAML-master/incremental/.ipynb_checkpoints/main_metric-checkpoint.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
from sklearn.metrics import f1_score
import cv2
def convert_label_to_similarity(normed_feature: Tensor, label: Tensor) -> Tuple[Tensor, Tensor]:
similarity_matrix = normed_feature @ normed_feature.transpose(1, 0)
label_matrix = label.unsqueeze(1) == label.unsqueeze(0)
positive_matrix = label_matrix.triu(diagonal=1)
negative_matrix = label_matrix.logical_not().triu(diagonal=1)
similarity_matrix = similarity_matrix.view(-1)
positive_matrix = positive_matrix.view(-1)
negative_matrix = negative_matrix.view(-1)
return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix]
class CircleLoss(nn.Module):
def __init__(self, m: float, gamma: float) -> None:
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: Tensor, sn: Tensor) -> Tensor:
ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0))
return loss
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=10000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=4,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=512)
parser.add_argument("--ckpt", default="output/final.pth", type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output_metric', help="output path")
parser.add_argument("--novel_dir", type=str, default='./novel/', help="novel path")
parser.add_argument("--test_mode", type=str, default='16_3', choices=['16_1','16_3','12','14'],
help="test mode")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"metric_model": metric_model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def get_spilt_center(feature,target,metric_model,label,device):
_, H, W, C = feature.shape
feature = feature.view(H,W,C) # (H*W, M)
target = target.view(H,W) # (H*W)
#feature = feature[target==label] # (N, M)
now_sum = torch.zeros(C,).to(device)
mask = target == label
print(mask.shape)
now_center_embedding=[]
mask = mask.cpu().data.numpy()
mask = mask.astype(np.uint8)
num_object, connect = cv2.connectedComponents(mask)
#novel_sum=0
for k in range(num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_mask = mask[now_connect]
now_mask = now_connect * mask
print(np.sum(now_mask))
if (np.sum(now_mask)<100): continue
print(now_mask.shape)
print(feature.shape)
now_feature=feature[now_mask==1]
print(now_feature.shape)
now_feature=now_feature.view(-1,C)
now_feature=torch.sum(now_feature,dim=0)/np.sum(now_mask)
#now_feature=torch.Tensor(now_feature).to(device)
now_embedding=metric_model.forward_feature(now_feature.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
now_center_embedding.append(now_embedding)
return now_center_embedding
def get_all_center(feature,target,metric_model,label):
_, H, W, C = feature.shape
feature = feature.view(-1,C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==label] # (N, M)
feature = torch.sum(feature, dim=0)
novel_sum = torch.sum(target == label)
now_center = feature / novel_sum
now_center_embedding = metric_model.forward_feature(now_center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return now_center_embedding
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
spilt_list=[]
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=[]
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,x,device)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
#center = center / novel_sum # (M,)
center=np.array(center)
print(center.shape)
'''
random select novel
np.random.seed(333333)
a = np.random.choice(100,1,False)
center=center[a]
print(center.shape)
'''
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
return center_embedding
'''
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
feature = feature.view(-1, C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==x] # (N, M)
feature = torch.sum(feature, dim=0)
if center is None:
center = torch.zeros(C,).to(device)
center += feature
novel_sum += torch.sum(target == x)
center = center / novel_sum # (M,)
center_embedding[x] = metric_model.forward_feature(center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return center_embedding
'''
def cosine_similarity(x,y):
num = x.dot(y.T)
denom = np.linalg.norm(x) * np.linalg.norm(y)
return num / denom
from copy import deepcopy
def align_embedding(opts, model, metric_model, train_loader, device, center_embedding, tag=None):
model.eval()
metric_model.eval()
remain_class = 19 - len(Cityscapes.unknown_target)
num = {key: 1 for key in center_embedding.keys()}
for batch_idx, (images, labels, labels_true, _, _) in tqdm(enumerate(train_loader)):
with torch.no_grad():
images = images.to(device, dtype=torch.float32)[0:1]
labels = labels.to(device, dtype=torch.long)[0:1]
labels_true = labels_true.to(device, dtype=torch.long)[0:1]
assert images.shape[0] == 1
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[remain_class:] # (3, H/4, W/4)
logits, region, connect = concat_logits(logits,250,erode=True,tag=tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos, tmp_emb = None, None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.9:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
tmp_emb = embedding
if tmp_key is not None:
center_embedding[tmp_key] += tmp_emb
num[tmp_key] += 1
# if batch_idx > 50: break
center_embedding = {key: value / num[key] for key, value in center_embedding.items()}
return center_embedding
def concat_logits(logits, thereshold=100, erode=True, tag=None):
if (isinstance(tag,list)):
mask = np.array(tag)
logits = np.transpose(logits)
logits = logits * mask
logits = np.transpose(logits)
logits = (logits >= 0.5).astype(np.uint8)
logits = np.sum(logits,axis=0)
logits[logits>=1]=1
mask = logits == 1
logits = logits.astype(np.uint8)
if (erode == True):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
logits = cv2.dilate(logits, kernel)
logits = cv2.erode(logits, kernel)
#print(logits.shape)
num_object, connect = cv2.connectedComponents(logits)
region_list = []
for k in range(1,num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_sum = np.sum(now_connect)
#print(now_sum)
if (np.sum(now_connect) < thereshold):
mask[connect == k] = 0
continue
region_list.append(k)
logits = logits * mask
return logits, region_list, connect
def check_novel_logit(opts,model,metric_model,class_no,meta_channel_num,device,beta=0.15):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_embedding = {}
spilt_list=[]
channel_tag=[0]*meta_channel_num
with torch.no_grad():
print('generate novel: '+str(class_no))
center=[]
novel_dst = Cityscapes_Novel(novel_path=opts.novel_dir, novel_no=class_no, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
#image, target = novel_transform(image,target)
image = image.to(device)
target = target.to(device,dtype=torch.long)
output,logit,feature,_ = model(image)
output = torch.argmax(output[0], dim=0).detach().cpu().numpy()
mask = target == class_no
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
#print(target.shape)
#print(mask.shape)
logit = logit[0, (-meta_channel_num):]
#print(logit.shape)
logit = logit * mask
mask = mask.data.cpu().numpy()
all_sum=np.sum(mask)
logit = logit.detach().cpu().numpy()
logit = (logit >= 0.5).astype(np.uint8)
for x in range(logit.shape[0]):
if (np.sum(logit[x])>all_sum*beta): channel_tag[x]=1
#print(logit.shape)
#for x in range(channel_num):
#print(image.shape)
#image= denorm(image.detach().cpu().numpy())[0] * 255
#print(image.shape)
image = (denorm(image.detach().cpu().numpy())[0] * 255).transpose(1, 2, 0).astype(np.uint8)
'''
plt.imshow(image)
plt.show()
plt.close()
_, axarr = plt.subplots(1, logit.shape[0], figsize=(5*logit.shape[0], 5))
for i in range(logit.shape[0]):
now_logit=cv2.resize(logit[i], output.shape[::-1], interpolation=cv2.INTER_NEAREST)
axarr[i].imshow(image)
axarr[i].imshow(now_logit, alpha=0.5)
plt.show()
plt.close()
'''
'''
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,label=x)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
'''
#center = center / novel_sum # (M,)
'''
center=np.array(center)
print(center.shape)
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
'''
return channel_tag
def val(opts, model, metric_model, train_loader, val_loader, device,):
remain_class = 19 - len(Cityscapes.unknown_target)
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19, remain_class)
model.eval()
metric_model.eval()
if opts.save_val_results:
if not os.path.exists('results_1'):
os.mkdir('results_1')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
if (opts.test_mode == '16_1'):
center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
else:
center_embedding = generate_novel(opts.novel_dir, Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#using when 16+1 setting
#center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#center_embedding = align_embedding(opts, model, metric_model, train_loader, device, center_embedding)
name=['sky','person','rider','car','truck','bus','train','motorcycle','bicycle']
meta_channel_num=20-remain_class
all_tag=[0]*meta_channel_num
if (opts.test_mode == '16_1'):
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
else:
for x in Cityscapes.unknown_target:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
#using when 16+1 setting
'''
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
'''
#all_tag = np.array(all_tag)
print(all_tag)
'''
if (opts.test_only):
center_embedding = align_embedding(opts ,model, metric_model, train_loader, device, center_embedding, all_tag)
'''
miou_all=[]
miou_unknown=[]
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
known_class = outputs.shape[1]
h,w=outputs.shape[2],outputs.shape[3]
#outputs = logits[:,0:known_class,:,:].clone()
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
# in 16 + 3 setting and 16 + 1 setting
if ('16' in opts.test_mode):
outputs19[outputs19 == 13] = 16
outputs19[outputs19 == 14] = 17
outputs19[outputs19 == 15] = 18
# in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
if ('14' in opts.test_mode):
outputs19[outputs19 == 13] = 18
outputs19[outputs19 == 12] = 17
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[known_class:] # (3, H/4, W/4)
# concat inference
logits, region, connect = concat_logits(logits, thereshold=250, tag=all_tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.8:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
# default inference
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
#using in 16+3 setting
if ('16' in opts.test_mode):
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
if (opts.test_mode == '16_1'):
for x in range(17,19):
labels_true[labels_true==x] = 255
# using in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
# 10 -> 14 ,13 ->15
if ('14' in opts.test_mode):
labels_true[labels_true == 10] = 114
outputs19[outputs19 == 10] = 114
for x in range(13,17):
labels_true[labels_true == x] = 100+2+x
outputs19[outputs19 == x] = 100+2+x
for x in range(11,13):
labels_true[labels_true == x] = x-1
outputs19[outputs19 == x] = x-1
for x in range(17,19):
labels_true[labels_true == x] = x-5
outputs19[outputs19 == x] = x-5
for x in range(114,119):
labels_true[labels_true == x] -=100
outputs19[outputs19 == x] -=100
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
'''
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
'''
'''
now_all_IoU = metrics19.get_results()['Mean IoU']
now_unkown_IoU = metrics19.get_results()['Unknown IoU']
miou_all.append(now_all_IoU)
miou_unknown.append(now_unkown_IoU)
metrics19.reset()
'''
#print(labels_true.shape)
#print(outputs19.shape)
if opts.save_val_results:
assert images.shape[0] == 1
target = labels_true[0].detach().cpu().numpy()
image = images[0].detach().cpu().numpy()
pred = outputs19
#pred = pred.reshape(h,w)
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = train_loader.dataset.decode_target(target).astype(np.uint8)
pred = train_loader.dataset.decode_target(pred).astype(np.uint8)
#scores = (255 * scores).squeeze().astype(np.uint8)
Image.fromarray(image).save('results_1/%d_image.png' % img_id)
Image.fromarray(target).save('results_1/%d_target.png' % img_id)
Image.fromarray(pred).save('results_1/%d_pred.png' % img_id)
#Image.fromarray(scores).save('results/%d_scores.png' % img_id)
# np.save('results/%d_dis_sum.npy' % img_id, dis_sum_map
img_id += 1
score16 = metrics16.get_results()
score19 = metrics19.get_results()
now_IoU = score19['Unknown IoU']
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
'''
for x in range(0,100):
print(x,miou_all[x],miou_unknown[x])
'''
return now_IoU
def train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = nn.CrossEntropyLoss().to(device)
model.eval()
metric_model.train()
epoch_records = {'f1': []}
cur_itr = 0
best_IoU = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
#val_save_dir = os.path.join(opts.output_dir, 'val')
#os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
_, _, features, _ = model(images)
labels_lst = F.interpolate(labels_lst.float(), size=features.shape[-2:], mode='nearest')
new_features, new_labels, logits = metric_model(features, labels_lst)
cir_loss = criterion(*convert_label_to_similarity(new_features, new_labels)) * 0.1
ce_loss = ce_criterion(logits, new_labels.long())
loss = {
'loss': cir_loss + ce_loss,
'cir_loss': cir_loss,
'ce_loss': ce_loss,
}
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
f1 = f1_score(new_labels.detach().cpu().numpy(),
torch.argmax(logits, dim=1).detach().cpu().numpy(),
average='macro')
epoch_records['f1'].append(f1)
if batch_idx % 100 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {'f1': []}
if cur_itr and cur_itr % 1000 == 0:
now_IoU = val(opts, model, metric_model, train_loader, val_loader, device)
if (now_IoU > best_IoU):
best_IoU = now_IoU
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'best.pth'))
print('best IoU :'+str(best_IoU))
model.eval()
metric_model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
val(opts, model, metric_model, train_loader, val_loader, device)
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self, known_class):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, known_class, bias=False)
self.known_class = known_class
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :self.known_class]
new_feature, new_label = [], []
for _ in range(self.known_class):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
unknown_num = len(train_dst.unknown_target)
remain_class = opts.num_classes - unknown_num
opts.num_classes = remain_class
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride, metric_dim=opts.metric_dim)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CircleLoss(m=0.25, gamma=8.0).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
model = nn.DataParallel(model)
model.to(device)
# if opts.continue_training:
# optimizer.load_state_dict(checkpoint["optimizer_state"])
# scheduler.load_state_dict(checkpoint["scheduler_state"])
# print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
model = nn.DataParallel(model)
model.to(device)
for _, param in model.named_parameters():
param.requires_grad = False
metric_model = MetricModel(remain_class).to(device)
optimizer = torch.optim.SGD(metric_model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if (opts.test_only):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
metric_model.load_state_dict(checkpoint["metric_model"])
val(opts, model, metric_model, train_loader, val_loader, device)
return
#res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
#model.to(device)
train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 43,558 | 44.092133 | 152 | py |
RAML | RAML-master/incremental/utils/loss.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(
inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class CrossEntropyLoss(nn.Module):
def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
super(CrossEntropyLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
self.criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
if self.cuda:
self.criterion = self.criterion.cuda()
def forward(self, logit, target, features_in):
n, c, h, w = logit.size()
CE_loss = self.criterion(logit, target.long())
return CE_loss / n
VAR_loss = Variable(torch.Tensor([0])).cuda()
Inter_loss = Variable(torch.Tensor([0])).cuda()
Center_loss = Variable(torch.Tensor([0])).cuda()
for i in range(n):
label = target[i]
label = label.flatten().cpu().numpy()
features = logit[i]
features = features.permute(1, 2, 0).contiguous()
shape = features.size()
features = features.view(shape[0]*shape[1], shape[2])
features_in_temp = features_in[i]
instances, counts = np.unique(label, False, False, True)
# print('counts', counts)
total_size = int(np.sum(counts))
for instance in instances:
if instance == self.ignore_index: # Ignore background
continue
locations = torch.LongTensor(np.where(label == instance)[0]).cuda()
vectors = torch.index_select(features, dim=0, index=locations)
features_temp = torch.index_select(features_in_temp, dim=0, index=locations)
centers_temp = torch.mean(features_temp, dim=0)
features_temp = features_temp - centers_temp
Center_loss += torch.sum(features_temp ** 2) / total_size
# print(size)
# print(-vectors[:,int(instance)])
# get instance mean and distances to mean of all points in an instance
VAR_loss += torch.sum((-vectors[:,int(instance)]))/total_size
Inter_loss += (torch.sum(vectors) - torch.sum((vectors[:,int(instance)]))) / total_size
# total_size += size
# VAR_loss += var_loss/total_size
loss = (CE_loss + self.alpha * VAR_loss + self.beta * Inter_loss +self.gamma * Center_loss) / n
# print(CE_loss/n, self.alpha * VAR_loss/n, self.beta * Inter_loss/n, self.gamma * Center_loss/n)
return loss
class CrossEntropyLoss_dis(nn.Module):
def __init__(self, alpha=0, beta=0, gamma=0, size_average=True, ignore_index=255):
super(CrossEntropyLoss_dis, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, logit, target, features_1, features_2):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index,size_average=self.size_average)
if self.cuda:
criterion = criterion.cuda()
CE_loss = criterion(logit, target.long())
return CE_loss / n
DIS_loss = Variable(torch.Tensor([0])).cuda()
appendix_lay = torch.zeros(n,w,h,1).cuda()
features_1 = torch.cat((features_1, appendix_lay), dim=3)
# print('features_1.shape: ', features_1.shape)
# print('features_2.shape: ', features_2.shape)
for i in range(n):
features_origin = features_1[i][target[i] != 16]
features_new = features_2[i][target[i] != 16]
features_diff = features_new - features_origin
DIS_loss += torch.sum(features_diff ** 2) / (features_diff.shape[0])
loss = CE_loss / n + 0.01 * DIS_loss / n
# print(CE_loss, DIS_loss)
return loss
# class CenterLoss(nn.Module):
# """Center loss.
# Reference:
# Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
# Args:
# num_classes (int): number of classes.
# feat_dim (int): feature dimension.
# """
# def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
# super(CenterLoss, self).__init__()
# self.num_classes = num_classes
# self.feat_dim = feat_dim
# self.use_gpu = use_gpu
# if self.use_gpu:
# self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
# else:
# self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
# def forward(self, x, labels):
# """
# Args:
# x: feature matrix with shape (batch_size, feat_dim, h, w).
# labels: ground truth labels with shape (batch_size, h, w).
# """
# batch_size = x.size(0)
# x = x.permute(0, 2, 3, 1) # (B, H, W, M)
# x = x.reshape((-1,self.feat_dim)) # (N, M)
# sample_size= x.size(0) # N
# labels = labels.flatten() # (N,)
# assert sample_size == labels.size(0)
# # (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
# # (N, C)
# distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
# torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
# # distmat - 2 (x * center.T)
# distmat.addmm_(1, -2, x, self.centers.t())
# classes = torch.arange(self.num_classes).long()
# if self.use_gpu: classes = classes.cuda()
# labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
# mask = labels.eq(classes.expand(sample_size, self.num_classes))
# dist = distmat * mask.float()
# loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
# return loss / batch_size
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=10, feat_dim=256, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) # (C, M)
self.criterion = nn.CrossEntropyLoss().cuda()
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
self.criterion = nn.CrossEntropyLoss()
def _dis_criterion(self, x, labels):
# x: (B, M, H, W) | labels: (B, H, W)
_, _, H, W = x.shape
assert H == W
x = torch.nn.functional.interpolate(x, size=[H//2, W//2])
labels = torch.nn.functional.interpolate(labels.unsqueeze(dim=1).float(), size=[H//2, W//2], mode="nearest")
logit = [-torch.sum((x.unsqueeze(dim=1) - self.centers.clone()[c:c+1, :].detach().view(1, 1, self.centers.shape[1], 1, 1)) ** 2, dim=2) for c in range(self.num_classes)]
logit = torch.cat(logit, dim=1)
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, self.num_classes)
label = labels.contiguous().view(-1)
#logit = -torch.sum((x.unsqueeze(dim=1) - self.centers.clone().detach().view(1, *self.centers.shape, 1, 1)) ** 2, dim=2)
loss = self.criterion(logit[label != 255], label[label != 255].long())
return loss
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim, h, w).
labels: ground truth labels with shape (batch_size, h, w).
"""
# feature = x.clone()
# feature_label = labels.clone()
batch_size = x.size(0)
x = x.permute(0, 2, 3, 1) # (B, H, W, M)
x = x.reshape((-1,self.feat_dim)) # (N, M)
sample_size= x.size(0) # N
labels = labels.flatten() # (N,)
assert sample_size == labels.size(0)
# (N, M) --> (N, 1) --> (N, C) | (C, M) --> (C, 1) --> (C, N) --> (N, C)
# (N, C)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(sample_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, sample_size).t()
# distmat - 2 (x * center.T)
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(sample_size, self.num_classes)
mask = labels.eq(classes.expand(sample_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / sample_size
#norm_loss = torch.exp(-torch.norm(self.centers.unsqueeze(dim=0)-self.centers.unsqueeze(dim=1), p=2, dim=-1))
#dis_loss = self._dis_criterion(feature, feature_label)
return loss / batch_size #+ norm_loss / batch_size
if __name__ =='__main__':
center_loss=CenterLoss()
print(center_loss.centers.data.shape)
center=center_loss.centers.data
torch.save(center,'center.pth')
#torch.save('./center.pth',center_loss.state_dict()) | 10,333 | 39.84585 | 177 | py |
RAML | RAML-master/incremental/utils/utils.py | from torchvision.transforms.functional import normalize
import torch.nn as nn
import numpy as np
import os
def denormalize(tensor, mean, std):
mean = np.array(mean)
std = np.array(std)
_mean = -mean/std
_std = 1/std
return normalize(tensor, _mean, _std)
class Denormalize(object):
def __init__(self, mean, std):
mean = np.array(mean)
std = np.array(std)
self._mean = -mean/std
self._std = 1/std
def __call__(self, tensor):
if isinstance(tensor, np.ndarray):
return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
return normalize(tensor, self._mean, self._std)
def set_bn_momentum(model, momentum=0.1):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = momentum
def fix_bn(model):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def colorEncode(labelmap, colors, mode='RGB'):
labelmap = labelmap.astype('int')
labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
dtype=np.uint8)
for label in unique(labelmap):
if label < 0:
continue
labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
np.tile(colors[label],
(labelmap.shape[0], labelmap.shape[1], 1))
if mode == 'BGR':
return labelmap_rgb[:, :, ::-1]
else:
return labelmap_rgb
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
| 2,850 | 28.391753 | 84 | py |
RAML | RAML-master/incremental/utils/scheduler.py | from torch.optim.lr_scheduler import _LRScheduler, StepLR
class PolyLR(_LRScheduler):
def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6):
self.power = power
self.max_iters = max_iters # avoid zero lr
self.min_lr = min_lr
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr)
for base_lr in self.base_lrs] | 509 | 41.5 | 96 | py |
RAML | RAML-master/incremental/utils/ext_transforms.py | import torchvision
import torch
import torchvision.transforms.functional as F
import random
import numbers
import numpy as np
from PIL import Image
#
# Extended Transforms for Semantic Segmentation
#
class ExtRandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img), F.hflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtCompose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, lbl):
for t in self.transforms:
img, lbl = t(img, lbl)
return img, lbl
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ExtCenterCrop(object):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return F.center_crop(img, self.size), F.center_crop(lbl, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class ExtRandomScale(object):
def __init__(self, scale_range, interpolation=Image.BILINEAR):
self.scale_range = scale_range
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
lbl (PIL Image): Label to be scaled.
Returns:
PIL Image: Rescaled image.
PIL Image: Rescaled label.
"""
assert img.size == lbl.size
scale = random.uniform(self.scale_range[0], self.scale_range[1])
target_size = ( int(img.size[1]*scale), int(img.size[0]*scale) )
return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtScale(object):
"""Resize the input PIL Image to the given scale.
Args:
Scale (sequence or int): scale factors
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, scale, interpolation=Image.BILINEAR):
self.scale = scale
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
lbl (PIL Image): Label to be scaled.
Returns:
PIL Image: Rescaled image.
PIL Image: Rescaled label.
"""
assert img.size == lbl.size
target_size = ( int(img.size[1]*self.scale), int(img.size[0]*self.scale) ) # (H, W)
return F.resize(img, target_size, self.interpolation), F.resize(lbl, target_size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtRandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img, lbl):
"""
img (PIL Image): Image to be rotated.
lbl (PIL Image): Label to be rotated.
Returns:
PIL Image: Rotated image.
PIL Image: Rotated label.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center), F.rotate(lbl, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class ExtRandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img), F.hflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtRandomVerticalFlip(object):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be flipped.
lbl (PIL Image): Label to be flipped.
Returns:
PIL Image: Randomly flipped image.
PIL Image: Randomly flipped label.
"""
if random.random() < self.p:
return F.vflip(img), F.vflip(lbl)
return img, lbl
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class ExtPad(object):
def __init__(self, diviser=32):
self.diviser = diviser
def __call__(self, img, lbl):
h, w = img.size
ph = (h//32+1)*32 - h if h%32!=0 else 0
pw = (w//32+1)*32 - w if w%32!=0 else 0
im = F.pad(img, ( pw//2, pw-pw//2, ph//2, ph-ph//2) )
lbl = F.pad(lbl, ( pw//2, pw-pw//2, ph//2, ph-ph//2))
return im, lbl
class ExtToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __init__(self, normalize=True, target_type='uint8'):
self.normalize = normalize
self.target_type = target_type
def __call__(self, pic, lbl):
"""
Note that labels will not be normalized to [0, 1].
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
lbl (PIL Image or numpy.ndarray): Label to be converted to tensor.
Returns:
Tensor: Converted image and label
"""
if self.normalize:
return F.to_tensor(pic), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
else:
return torch.from_numpy( np.array( pic, dtype=np.float32).transpose(2, 0, 1) ), torch.from_numpy( np.array( lbl, dtype=self.target_type) )
def __repr__(self):
return self.__class__.__name__ + '()'
class ExtNormalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor, lbl):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
tensor (Tensor): Tensor of label. A dummy input for ExtCompose
Returns:
Tensor: Normalized Tensor image.
Tensor: Unchanged Tensor label
"""
return F.normalize(tensor, self.mean, self.std), lbl
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class ExtRandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
"""
def __init__(self, size, padding=0, pad_if_needed=False):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be cropped.
lbl (PIL Image): Label to be cropped.
Returns:
PIL Image: Cropped image.
PIL Image: Cropped label.
"""
assert img.size == lbl.size, 'size of img and lbl should be the same. %s, %s'%(img.size, lbl.size)
if self.padding > 0:
img = F.pad(img, self.padding)
lbl = F.pad(lbl, self.padding)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = F.pad(img, padding=int((1 + self.size[1] - img.size[0]) / 2))
lbl = F.pad(lbl, padding=int((1 + self.size[1] - lbl.size[0]) / 2))
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = F.pad(img, padding=int((1 + self.size[0] - img.size[1]) / 2))
lbl = F.pad(lbl, padding=int((1 + self.size[0] - lbl.size[1]) / 2))
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w), F.crop(lbl, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class ExtResize(object):
"""Resize the input PIL Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation), F.resize(lbl, self.size, Image.NEAREST)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class ExtColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img), lbl
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string | 20,817 | 35.458844 | 150 | py |
LLP-VAT | LLP-VAT-main/llp_vat/main.py | import argparse
import os
import uuid
from tqdm.auto import tqdm
import arrow
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from llp_vat.lib.llp import (BagMiniBatch, load_llp_dataset, BagSampler,
Iteration)
from llp_vat.lib.losses import (ProportionLoss, PiModelLoss, VATLoss,
compute_hard_l1, compute_soft_kl)
from llp_vat.lib.networks import wide_resnet28_2
from llp_vat.lib.run_experiment import (write_meters, RunExperiment,
save_checkpoint)
from llp_vat.lib.ramps import sigmoid_rampup
from llp_vat.lib.utils import AverageMeterSet, accuracy, parameters_string
def get_rampup_weight(weight, iteration, rampup):
alpha = weight * sigmoid_rampup(iteration.value, rampup)
return alpha
def train_llp(args, epoch, iteration, model, optimizer, loader,
criterion, consistency_criterion, logger):
meters = AverageMeterSet()
mini_batch = BagMiniBatch(args.n_samples)
# set up training mode for model
model.train()
for i, (x, y) in tqdm(enumerate(loader),
"[train#{}]".format(epoch),
leave=False,
ncols=150,
total=len(loader),
disable=args.disable):
with torch.autograd.set_detect_anomaly(True):
x = x.cuda()
y = y.cuda()
# accumulate x until the batch size is greater than or equal to
# the buffer size
mini_batch.append(x, y)
if mini_batch.num_bags < args.mini_batch_size:
continue
# skip training if there exists only one instance in a mini-batch
# because the BatchNorm would crash under this circumstance
if mini_batch.total_size == 1:
continue
# concatenate all bags
x, y = map(torch.cat, zip(*mini_batch.bags))
logits = None
if args.consistency_type == "vat":
# VAT should be calculated before the forward for cross entropy
consistency_loss = consistency_criterion(model, x)
elif args.consistency_type == "pi":
consistency_loss, logits = consistency_criterion(model, x)
else:
consistency_loss = torch.tensor(0.)
alpha = get_rampup_weight(args.consistency, iteration,
args.consistency_rampup)
consistency_loss = alpha * consistency_loss
meters.update("cons_loss", consistency_loss.item())
meters.update("cons_weight", alpha)
# reuse the logits from pi-model
if logits is None:
logits = model(x)
probs = F.softmax(logits, dim=1)
# compute proportion loss for each bag
if args.alg == "uniform":
# compute propotion loss in the batch way
batch_probs = probs.view(
mini_batch.num_bags, args.bag_size, -1)
batch_avg_probs = torch.mean(batch_probs, dim=1)
batch_target = torch.stack(mini_batch.targets)
prop_loss = criterion(batch_avg_probs, batch_target)
else:
# compute proportion loss in sequential way
prop_loss = 0
start = 0
for bag_size, target in mini_batch:
# proportion loss
avg_probs = torch.mean(
probs[start:start + bag_size], dim=0)
prop_loss += criterion(avg_probs, target)
start += bag_size
prop_loss = prop_loss / mini_batch.num_bags
meters.update("prop_loss", prop_loss.item())
# proportion_loss + consistency_loss
loss = prop_loss + consistency_loss
meters.update("loss", loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration.step()
prec1, prec5 = accuracy(logits, y.argmax(1), top_k=(1, 5))
meters.update("top1", prec1.item(), y.size(0))
meters.update("top5", prec5.item(), y.size(0))
# clear mini_batch
mini_batch.reset()
if logger:
logger.info("Epoch#{}-{} "
"cons_weight={meters[cons_weight].avg:.4f} "
"cons_loss={meters[cons_loss].avg:.4f} "
"prop_loss={meters[prop_loss].avg:.4f} "
"loss={meters[loss].avg:.4f} "
"prec@1={meters[top1].avg:.2f}% "
"prec@5={meters[top5].avg:.2f}%".format(epoch,
iteration.value,
meters=meters))
return meters
def eval(args, epoch, iteration, model, loader, criterion, logger, prefix=""):
meters = AverageMeterSet()
num_classes = 100 if args.dataset_name == 'cifar100' else 10
model.eval()
for x, y in tqdm(loader,
"[Evalutaion]",
leave=False,
ncols=150,
disable=args.disable):
x = x.cuda()
y = y.cuda()
with torch.no_grad():
logits = model(x)
probs = F.softmax(logits, dim=1)
avg_probs = torch.mean(probs, dim=0)
avg_ys = torch.mean(y, dim=0)
soft_kl = compute_soft_kl(avg_probs, avg_ys)
hard_l1 = compute_hard_l1(probs, y, num_classes)
loss = criterion(avg_probs, avg_ys)
meters.update('soft_kl', soft_kl.item())
meters.update('hard_l1', hard_l1.item())
meters.update('prop_loss', loss.item())
prec1, prec5 = accuracy(logits, y.argmax(1), top_k=(1, 5))
meters.update('top1', prec1.item(), y.size(0))
meters.update('top5', prec5.item(), y.size(0))
if logger:
logger.info("Epoch#{}-{} "
"{prefix}soft_kl={meters[soft_kl].avg:.4f} "
"{prefix}hard_l1={meters[hard_l1].avg:.4f} "
"{prefix}prop_loss={meters[prop_loss].avg:.4f} "
"{prefix}prec@1={meters[top1].avg:.2f}% "
"{prefix}prec@5={meters[top5].avg:.2f}%".format(
epoch, iteration.value, meters=meters, prefix=prefix))
return meters
def train_valid_split(dataset, valid_ratio, seed):
torch.manual_seed(seed)
valid_size = int(valid_ratio * len(dataset))
train_size = len(dataset) - valid_size
train, valid = random_split(dataset, [train_size, valid_size])
return train, valid
def create_model(model_name, num_classes, dataset_name):
if model_name == "wrn28-2":
if dataset_name.lower().startswith("cifar"):
dropout_rate = 0.3
elif dataset_name.lower().startswith("svhn"):
dropout_rate = 0.4
else:
raise NameError("Unknown dataset name")
print("Dropout: {}".format(dropout_rate))
return wide_resnet28_2(dropout_rate=dropout_rate,
num_classes=num_classes)
else:
raise NameError("Unknown model name")
def run_experiment(args, experiment):
experiment.save_config(vars(args))
# create logger for training, testing, validation
logger = experiment.create_logfile("experiment")
train_log = experiment.create_logfile("train")
valid_log = experiment.create_logfile("valid")
test_log = experiment.create_logfile("test")
# create tensorboard writer
tb_writer = experiment.create_tb_writer()
logger.info(args)
# load LLP dataset
if args.alg == "uniform":
dataset, bags = load_llp_dataset(args.dataset_dir,
args.obj_dir,
args.dataset_name,
args.alg,
replacement=args.replacement,
bag_size=args.bag_size)
elif args.alg == "kmeans":
dataset, bags = load_llp_dataset(args.dataset_dir,
args.obj_dir,
args.dataset_name,
args.alg,
n_clusters=args.n_clusters,
reduction=args.reduction)
else:
raise NameError("The bag creation algorithm is unknown")
# consturct data loader
train_bags, valid_bags = train_valid_split(bags, args.valid, args.seed)
train_bag_sampler = BagSampler(train_bags, args.num_bags)
train_loader = DataLoader(dataset["train"],
batch_sampler=train_bag_sampler,
pin_memory=True,
num_workers=2)
valid_loader = None
if args.valid > 0:
valid_bag_sampler = BagSampler(valid_bags, num_bags=-1)
valid_loader = DataLoader(dataset["train"],
batch_sampler=valid_bag_sampler,
pin_memory=True,
num_workers=2)
test_loader = DataLoader(dataset["test"],
batch_size=256,
pin_memory=True,
num_workers=2)
# declare model
model = create_model(args.model_name, dataset["num_classes"],
args.dataset_name)
model = model.cuda()
# declare optimizer
if args.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(),
momentum=0.9,
lr=args.lr,
weight_decay=args.weight_decay)
elif args.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
else:
raise NameError("optimizer {} is not supported".format(args.optimizer))
# print model architecture and optimizer
logger.info(parameters_string(model))
logger.info(optimizer)
# declare LLP criterion - the Proportion loss
criterion = ProportionLoss(args.metric, 1.0)
logger.info(criterion)
# declare consistency criterion
if args.consistency_type == "none":
consistency_criterion = None
elif args.consistency_type == "vat":
consistency_criterion = VATLoss(xi=args.xi, eps=args.eps, ip=args.ip)
elif args.consistency_type == "pi":
consistency_criterion = PiModelLoss(std=args.std)
else:
raise NameError("Unknown consistency criterion")
if consistency_criterion and args.consistency_rampup == -1:
args.consistency_rampup = 0.4 * args.num_epochs * \
len(train_loader) / args.mini_batch_size
# ajust learning rate
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=320, gamma=0.2)
iteration = Iteration()
for epoch in range(args.num_epochs):
train_meters = train_llp(args, epoch, iteration, model,
optimizer, train_loader, criterion,
consistency_criterion, train_log)
write_meters(epoch, "train", tb_writer, train_meters)
if valid_loader:
valid_meters = eval(args, epoch, iteration, model, valid_loader,
criterion, valid_log)
write_meters(epoch, "valid", tb_writer, valid_meters)
test_meters = eval(args, epoch, iteration, model, test_loader,
criterion, test_log)
write_meters(epoch, "test", tb_writer, test_meters)
scheduler.step()
# save checkpoint
if (epoch + 1) % 50 == 0:
logger.info("Save checkpoint#{}".format(epoch))
filename = os.path.join(experiment.result_dir, "model.tar")
save_checkpoint(filename, model, epoch, optimizer)
tb_writer.close()
def main(args):
uid = "{time}_{uuid}".format(
time=arrow.utcnow().format("YYYYMMDDTHH:mm:ss"),
uuid=str(uuid.uuid4())[:4]
)
result_dir = os.path.join(args.result_dir, uid)
experiment = RunExperiment(result_dir)
run_experiment(args, experiment)
def get_args():
parser = argparse.ArgumentParser(
"Learning from Label Proportions with Consistency Regularization")
# basic arguments
parser.add_argument("--obj_dir", default="./obj")
parser.add_argument("--dataset_dir", default="./obj/dataset")
parser.add_argument("--result_dir", default="./results")
parser.add_argument("-d", "--dataset_name", type=str)
parser.add_argument("-m", "--model_name", type=str, default="wrn28-2")
parser.add_argument("-e", "--num_epochs", type=int, default=400)
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--optimizer", type=str, default="adam")
parser.add_argument("--weight_decay", type=float, default=0.)
parser.add_argument("--metric", type=str, default="ce")
parser.add_argument("--valid", type=float, default=0.1)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--n_samples", default=0, type=int)
parser.add_argument("--disable", action="store_true",
help="disable the progress bar")
# bag creation algorithms
parser.add_argument("--alg", choices=["uniform", "kmeans"])
parser.add_argument("-b", "--bag_size", type=int)
parser.add_argument("--replacement", action="store_true")
parser.add_argument("-k", "--n_clusters", type=int)
parser.add_argument("--reduction", type=int, default=600)
# coefficient for proportion loss
parser.add_argument("--num_bags", default=-1, type=int)
parser.add_argument("--mini_batch_size", type=int, default=2)
# consistency args
parser.add_argument("--consistency_type",
choices=["vat", "pi", "none"],
default="vat")
parser.add_argument("--consistency", type=float, default=0.05)
parser.add_argument("--consistency_rampup", type=int, default=-1)
# pi args
parser.add_argument("--std", type=float, default=0.15)
# vat args
parser.add_argument("--xi", type=float, default=1e-6)
parser.add_argument("--eps", type=float, default=6.0)
parser.add_argument("--ip", type=int, default=1)
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
main(args)
| 14,895 | 39.150943 | 79 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/losses.py | import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.constraints import simplex
from llp_vat.lib.networks import GaussianNoise
def compute_soft_kl(inputs, targets):
with torch.no_grad():
loss = cross_entropy_loss(inputs, targets)
loss = torch.sum(loss, dim=-1).mean()
return loss
def compute_hard_l1(inputs, targets, num_classes):
with torch.no_grad():
predicted = torch.bincount(inputs.argmax(1),
minlength=num_classes).float()
predicted = predicted / torch.sum(predicted, dim=0)
targets = torch.mean(targets, dim=0)
loss = F.l1_loss(predicted, targets, reduction="sum")
return loss
def cross_entropy_loss(input, target, eps=1e-8):
assert simplex.check(input) and simplex.check(target), \
"input {} and target {} should be a simplex".format(input, target)
input = torch.clamp(input, eps, 1 - eps)
loss = -target * torch.log(input)
return loss
class ProportionLoss(nn.Module):
def __init__(self, metric, alpha, eps=1e-8):
super(ProportionLoss, self).__init__()
self.metric = metric
self.eps = eps
self.alpha = alpha
def forward(self, input, target):
# input and target shoud ba a probability tensor
# and have been averaged over bag size
assert simplex.check(input) and simplex.check(target), \
"input {} and target {} should be a simplex".format(input, target)
assert input.shape == target.shape
if self.metric == "ce":
loss = cross_entropy_loss(input, target, eps=self.eps)
elif self.metric == "l1":
loss = F.l1_loss(input, target, reduction="none")
elif self.metric == "mse":
loss = F.mse_loss(input, target, reduction="none")
else:
raise NameError("metric {} is not supported".format(self.metric))
loss = torch.sum(loss, dim=-1).mean()
return self.alpha * loss
@contextlib.contextmanager
def _disable_tracking_bn_stats(model):
def switch_attr(m):
if hasattr(m, 'track_running_stats'):
m.track_running_stats ^= True
model.apply(switch_attr)
yield
model.apply(switch_attr)
def _l2_normalize(d):
d_reshaped = d.view(d.shape[0], -1, *(1 for _ in range(d.dim() - 2)))
d /= torch.norm(d_reshaped, dim=1, keepdim=True) + 1e-8
return d
class VATLoss(nn.Module):
def __init__(self, xi=10.0, eps=1.0, ip=1):
"""VAT loss
:param xi: hyperparameter of VAT (default: 10.0)
:param eps: hyperparameter of VAT (default: 1.0)
:param ip: iteration times of computing adv noise (default: 1)
"""
super(VATLoss, self).__init__()
self.xi = xi
self.eps = eps
self.ip = ip
def forward(self, model, x):
with torch.no_grad():
pred = F.softmax(model(x), dim=1)
# prepare random unit tensor
# d = torch.rand(x.shape).sub(0.5).to(x.device)
d = torch.randn_like(x)
d = _l2_normalize(d)
with _disable_tracking_bn_stats(model):
# calc adversarial direction
for _ in range(self.ip):
d.requires_grad_()
pred_hat = model(x + self.xi * d)
logp_hat = F.log_softmax(pred_hat, dim=1)
adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean')
adv_distance.backward()
d = _l2_normalize(d.grad)
model.zero_grad()
# calc LDS
r_adv = d * self.eps
pred_hat = model(x + r_adv)
logp_hat = F.log_softmax(pred_hat, dim=1)
lds = F.kl_div(logp_hat, pred, reduction='batchmean')
return lds
class PiModelLoss(nn.Module):
def __init__(self, std=0.15):
super(PiModelLoss, self).__init__()
self.gn = GaussianNoise(std)
def forward(self, model, x):
logits1 = model(x)
probs1 = F.softmax(logits1, dim=1)
with torch.no_grad():
logits2 = model(self.gn(x))
probs2 = F.softmax(logits2, dim=1)
loss = F.mse_loss(probs1, probs2, reduction="sum") / x.size(0)
return loss, logits1
| 4,292 | 30.8 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/run_experiment.py | import glob
import os
import pathlib
import warnings
import logzero
import torch
import torch.nn as nn
import yaml
from torch.utils.tensorboard import SummaryWriter
def write_meters(epoch, tag, tb_writer, meters):
for name, value in meters.averages("").items():
tb_writer.add_scalar("{}/{}".format(tag, name), value, epoch)
def save_checkpoint(filename, model, epoch, optimizer=None):
checkpoint = {'epoch': epoch}
if isinstance(model, nn.DataParallel):
checkpoint['state_dict'] = model.module.state_dict()
else:
checkpoint['state_dict'] = model.state_dict()
if optimizer is not None:
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
def load_checkpoint(filename, model, optimizer=None, device="cpu"):
checkpoint = torch.load(filename, map_location=device)
model.load_state_dict(checkpoint['state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer
else:
return model
class RunExperiment:
def __init__(self, result_dir, mode="w"):
self._check_path(result_dir)
self.result_dir = result_dir
self.mode = mode
def _check_path(self, path):
"""Create directory if path doesn't exist"""
if path is not None:
if os.path.isfile(path):
raise TypeError("Cannot create directory {}".format(path))
target_dir = path
if os.path.exists(path):
warnings.warn(
"Experiment {} has been executed before".format(path))
opt = input("Continue running the experiment, y/[n]: ")
if opt.lower() != "y":
raise RuntimeError()
pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True)
def create_logfile(self, name):
fmt = ("%(color)s[%(levelname)s %(name)s %(asctime)s]"
"%(end_color)s %(message)s")
log_fmt = logzero.LogFormatter(fmt=fmt)
if name is None:
filename = None
elif not name.endswith(".log"):
filename = os.path.join(self.result_dir, name + ".log")
else:
filename = os.path.join(self.result_dir, name)
if os.path.exists(filename):
os.remove(filename)
return logzero.setup_logger(name=name,
logfile=filename,
formatter=log_fmt)
def create_tb_writer(self):
# remove previous tensorboard results
files = glob.glob(os.path.join(self.result_dir,
'events.out.tfevents*'))
for f in files:
try:
os.remove(f)
except Exception:
raise RuntimeError("Error while removing file {}".format(f))
writer = SummaryWriter(self.result_dir)
return writer
def save_config(self, config):
with open(os.path.join(self.result_dir, "config.yml"), "w") as fp:
yaml.dump(config, fp)
| 3,101 | 31.652632 | 76 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/networks.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def wide_resnet28_2(**kwargs):
net = WideResNet(28, 2, **kwargs)
net.apply(conv_init)
return net
class GaussianNoise(nn.Module):
""" add gasussian noise into feature """
def __init__(self, std):
super(GaussianNoise, self).__init__()
self.std = std
def forward(self, x):
zeros_ = torch.zeros_like(x)
n = torch.normal(zeros_, std=self.std)
return x + n
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
padding=1,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
planes,
kernel_size=1,
stride=stride,
bias=True), )
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(WideResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) // 6
k = widen_factor
print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(WideBasic,
nStages[1],
n,
dropout_rate,
stride=1)
self.layer2 = self._wide_layer(WideBasic,
nStages[2],
n,
dropout_rate,
stride=2)
self.layer3 = self._wide_layer(WideBasic,
nStages[3],
n,
dropout_rate,
stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
| 4,260 | 31.526718 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/llp.py | import os
import pathlib
import time
from itertools import groupby
import numpy as np
import torch
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA
from torch.utils.data import Sampler, BatchSampler, RandomSampler
from llp_vat.lib.datasets import load_dataset
class Iteration:
def __init__(self, start=0):
self.value = start
def step(self, step=1):
self.value += step
class BagMiniBatch:
def __init__(self, n_samples):
self.n_samples = n_samples
self.reset()
def reset(self):
self.bags = []
self.bag_sizes = []
self.targets = [] # store proportion labels
def append(self, x, y):
assert x.size(0) == y.size(0)
self.targets.append(torch.mean(y, dim=0))
if self.n_samples > 0:
index = torch.randperm(x.size(0))[:self.n_samples]
x = x[index]
y = y[index]
self.bags.append((x, y))
self.bag_sizes.append(y.size(0))
def __iter__(self):
for item in zip(self.bag_sizes, self.targets):
yield item
@property
def total_size(self):
return sum(self.bag_sizes)
@property
def max_bag_size(self):
return max(self.bag_sizes)
@property
def num_bags(self):
return len(self.bag_sizes)
class BagSampler(Sampler):
def __init__(self, bags, num_bags=-1):
"""
params:
bags: shape (num_bags, num_instances), the element of a bag
is the instance index of the dataset
num_bags: int, -1 stands for using all bags
"""
self.bags = bags
if num_bags == -1:
self.num_bags = len(bags)
else:
self.num_bags = num_bags
assert 0 < self.num_bags <= len(bags)
def __iter__(self):
indices = torch.randperm(self.num_bags)
for index in indices:
yield self.bags[index]
def __len__(self):
return len(self.bags)
def uniform_creation(dataset, bag_size, replacement, seed, drop_last=True):
"""
return:
bags: a nested list containing instance indices, shape (n_bags, *)
"""
torch.manual_seed(seed)
start = time.time()
indices = RandomSampler(range(len(dataset)), replacement=replacement)
bags = list(BatchSampler(indices, batch_size=bag_size,
drop_last=drop_last))
print("Create uniform bags in {:.2f} seconds".format(time.time() - start))
return bags
def kmeans_creation(dataset, n_clusters, reduction, seed):
random_state = np.random.RandomState(seed)
data = [(x, y) for (x, y) in dataset]
X, y = map(torch.stack, zip(*data))
X = X.view(X.size(0), -1)
# PCA reduction
start = time.time()
pca = PCA(n_components=reduction)
X_new = pca.fit_transform(X)
print("PCA-{} in {:.2f} seconds".format(reduction, time.time() - start))
# assign bag label by k-means clustering
start = time.time()
init_size = max(3 * n_clusters, 300)
kmeans = MiniBatchKMeans(n_clusters=n_clusters,
random_state=random_state,
init_size=init_size)
kmeans.fit(X_new)
bag_labels = kmeans.predict(X_new)
print("K-means {} in {:.2f} seconds".format(n_clusters,
time.time() - start))
# create bags
start = time.time()
bags = sorted(zip(bag_labels, range(len(bag_labels))), key=lambda x: x[0])
bags = [[idx for _, idx in data]
for _, data in groupby(bags, key=lambda x: x[0])]
print("Create kmeans bags in {:.2f} seconds".format(time.time() - start))
return bags
def load_llp_dataset(dataset_dir, obj_dir, dataset_name, alg, **kwargs):
dataset = load_dataset(dataset_dir, dataset_name)
if alg == "uniform":
sampling = "SWR" if kwargs["replacement"] else "SWOR"
filename = "uniform-{}-{}.npy".format(sampling, kwargs["bag_size"])
elif alg == "kmeans":
filename = "kmeans-{}-{}.npy".format(kwargs["n_clusters"],
kwargs["reduction"])
elif alg == "overlap":
filename = "overlap-{}-{}.npy".format(kwargs["num_overlaps"],
kwargs["bag_size"])
else:
raise NameError("algorithm {} is not supported".format(alg))
path = os.path.join(obj_dir, dataset_name, filename)
bags = np.load(path, allow_pickle=True)
print("Load bags from {}".format(path))
return dataset, bags
def create_llp_dataset(dataset_dir, obj_dir, dataset_name, alg, **kwargs):
dataset = load_dataset(dataset_dir, dataset_name)
if alg == "uniform":
sampling = "SWR" if kwargs["replacement"] else "SWOR"
filename = "uniform-{}-{}.npy".format(sampling, kwargs["bag_size"])
bags = uniform_creation(dataset["train"], **kwargs)
elif alg == "kmeans":
filename = "kmeans-{}-{}.npy".format(kwargs["n_clusters"],
kwargs["reduction"])
bags = kmeans_creation(dataset["train"], **kwargs)
else:
raise NameError("algorithm {} is not supported".format(alg))
path = os.path.join(obj_dir, dataset_name, filename)
# dump bags
dirname = os.path.dirname(os.path.abspath(path))
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
bags = np.array(bags)
np.save(path, bags)
| 5,463 | 31.141176 | 78 | py |
LLP-VAT | LLP-VAT-main/llp_vat/lib/datasets.py | import torch
import torch.nn.functional as F
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100, SVHN
class ToOneHot:
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, y: int) -> torch.Tensor:
one_hot = F.one_hot(torch.tensor(y), num_classes=self.num_classes)
return one_hot.float()
def cifar10(root):
channel_stats = dict(mean=[0.4914, 0.4822, 0.4465],
std=[0.2470, 0.2435, 0.2616])
num_classes = 10
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = CIFAR10(root,
train=True,
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = CIFAR10(root,
train=False,
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def cifar100(root):
channel_stats = dict(mean=[0.5071, 0.4865, 0.4409],
std=[0.2673, 0.2564, 0.2762])
num_classes = 100
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = CIFAR100(root,
train=True,
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = CIFAR100(root,
train=False,
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def svhn(root):
channel_stats = dict(mean=[0.4377, 0.4438, 0.4728],
std=[0.1980, 0.2010, 0.1970])
num_classes = 10
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**channel_stats)
]),
"test":
transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(**channel_stats)])
}
train = SVHN(root,
split='train',
transform=transform["train"],
target_transform=ToOneHot(num_classes),
download=True)
test = SVHN(root,
split='test',
transform=transform["test"],
target_transform=ToOneHot(num_classes),
download=True)
return {'train': train, 'test': test, 'num_classes': num_classes}
def load_dataset(root, dataset_name):
dataset_name = dataset_name.lower()
if dataset_name == "cifar10":
dataset = cifar10(root)
elif dataset_name == "cifar100":
dataset = cifar100(root)
elif dataset_name == "svhn":
dataset = svhn(root)
else:
raise NameError("dataset {} is not supported".format(dataset_name))
return dataset
| 3,819 | 30.570248 | 75 | py |
ADLD | ADLD-master/test.py | import argparse
import os
import torch.optim as optim
import torch.utils.data as util_data
import itertools
import network
import pre_process as prep
import lr_schedule
from util import *
from data_list import ImageList_au, ImageList_land_au
optim_dict = {'SGD': optim.SGD, 'Adam': optim.Adam}
def main(config):
## set loss criterion
use_gpu = torch.cuda.is_available()
## prepare data
dsets = {}
dset_loaders = {}
dsets['source'] = {}
dset_loaders['source'] = {}
dsets['source']['test'] = ImageList_au(config.src_test_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['source']['test'] = util_data.DataLoader(dsets['source']['test'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
dsets['target'] = {}
dset_loaders['target'] = {}
dsets['target']['test'] = ImageList_au(config.tgt_test_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['target']['test'] = util_data.DataLoader(dsets['target']['test'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
## set network modules
base_net = network.network_dict[config.base_net]()
land_enc = network.network_dict[config.land_enc](land_num=config.land_num)
au_enc = network.network_dict[config.au_enc](au_num=config.au_num)
invar_shape_enc = network.network_dict[config.invar_shape_enc]()
feat_gen = network.network_dict[config.feat_gen]()
if use_gpu:
base_net = base_net.cuda()
land_enc = land_enc.cuda()
au_enc = au_enc.cuda()
invar_shape_enc = invar_shape_enc.cuda()
feat_gen = feat_gen.cuda()
base_net.train(False)
land_enc.train(False)
au_enc.train(False)
invar_shape_enc.train(False)
feat_gen.train(False)
print(base_net, land_enc, au_enc, invar_shape_enc, feat_gen)
if not os.path.exists(config.write_path_prefix + config.mode):
os.makedirs(config.write_path_prefix + config.mode)
if not os.path.exists(config.write_res_prefix + config.mode):
os.makedirs(config.write_res_prefix + config.mode)
test_type = 'target' # 'source'
if config.start_epoch <= 0:
raise (RuntimeError('start_epoch should be larger than 0\n'))
res_file = open(config.write_res_prefix + config.mode + '/' + test_type + '_test_AU_pred_' + str(config.start_epoch) + '.txt', 'w')
for epoch in range(config.start_epoch, config.n_epochs + 1):
base_net.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/base_net_' + str(epoch) + '.pth'))
land_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/land_enc_' + str(epoch) + '.pth'))
au_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/au_enc_' + str(epoch) + '.pth'))
invar_shape_enc.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(epoch) + '.pth'))
feat_gen.load_state_dict(
torch.load(config.write_path_prefix + config.mode + '/feat_gen_' + str(epoch) + '.pth'))
if test_type == 'source':
f1score_arr, acc_arr = AU_detection_eval_src(dset_loaders[test_type]['test'], base_net, au_enc, use_gpu=use_gpu)
else:
f1score_arr, acc_arr = AU_detection_eval_tgt(dset_loaders[test_type]['test'], base_net, land_enc, au_enc,
invar_shape_enc, feat_gen, use_gpu=use_gpu)
print('epoch =%d, f1 score mean=%f, accuracy mean=%f' %(epoch, f1score_arr.mean(), acc_arr.mean()))
print>> res_file, '%d\t%f\t%f' % (epoch, f1score_arr.mean(), acc_arr.mean())
res_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--gpu_id', type=str, default='0', help='device id to run')
parser.add_argument('--crop_size', type=int, default=176, help='crop size for images')
parser.add_argument('--output_size', type=int, default=44, help='size for landmark response map')
parser.add_argument('--au_num', type=int, default=6, help='number of AUs')
parser.add_argument('--land_num', type=int, default=49, help='number of landmarks')
parser.add_argument('--eval_batch_size', type=int, default=4, help='mini-batch size for evaluation')
parser.add_argument('--start_epoch', type=int, default=1, help='starting epoch')
parser.add_argument('--n_epochs', type=int, default=10, help='number of total epochs')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--mode', type=str, default='weak', choices=['weak', 'full'])
parser.add_argument('--base_net', type=str, default='Feat_Enc')
parser.add_argument('--land_enc', type=str, default='Land_Detect')
parser.add_argument('--au_enc', type=str, default='AU_Detect')
parser.add_argument('--invar_shape_enc', type=str, default='Texture_Enc')
parser.add_argument('--feat_gen', type=str, default='Generator')
# Directories.
parser.add_argument('--write_path_prefix', type=str, default='data/snapshots/')
parser.add_argument('--write_res_prefix', type=str, default='data/res/')
parser.add_argument('--src_test_path_prefix', type=str, default='data/list/BP4D_test')
parser.add_argument('--tgt_test_path_prefix', type=str, default='data/list/emotioNet_test')
config = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
print(config)
main(config)
| 5,813 | 43.381679 | 135 | py |
ADLD | ADLD-master/network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Feat_Enc(nn.Module):
def __init__(self):
super(Feat_Enc, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.PReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.PReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Tanh(),
)
def forward(self, x):
align_output = self.align_attention_features(x)
return align_output
class AU_Detect(nn.Module):
def __init__(self, au_num):
super(AU_Detect, self).__init__()
self.aus_feat = nn.ModuleList([nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
) for i in range(au_num)])
self.aus_fc = nn.ModuleList([
nn.Linear(64, 1)
for i in range(au_num)])
def forward(self, x):
start = True
for i in range(len(self.aus_fc)):
au_feat = self.aus_feat[i](x)
au_feat_interm = F.avg_pool2d(au_feat, au_feat.size()[2:])
au_feat_interm = au_feat_interm.view(au_feat_interm.size(0), -1)
au_output = self.aus_fc[i](au_feat_interm)
if start:
aus_output = au_output
aus_feat = au_feat_interm
start = False
else:
aus_output = torch.cat((aus_output, au_output), 1)
aus_feat = torch.cat((aus_feat, au_feat_interm), 1)
return aus_feat, aus_output
class Land_Detect(nn.Module):
def __init__(self, land_num):
super(Land_Detect, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
nn.Conv2d(64, land_num, kernel_size=3, stride=1, padding=1),
)
def forward(self, x):
align_feat = self.align_attention_features[:-1](x)
align_output = self.align_attention_features[-1](align_feat)
start = True
for i in range(align_output.size(1)):
t_align_attention_feat_ori = align_output[:, i, :, :]
t_align_attention_feat = t_align_attention_feat_ori.view(t_align_attention_feat_ori.size(0), -1)
t_align_attention_feat = F.softmax(t_align_attention_feat, 1)
t_align_attention_feat = t_align_attention_feat.view(t_align_attention_feat_ori.size(0), 1,
t_align_attention_feat_ori.size(1),
t_align_attention_feat_ori.size(2))
if start:
align_attention = t_align_attention_feat
start = False
else:
align_attention = torch.cat((align_attention, t_align_attention_feat), 1)
return align_attention, align_feat, align_output
class Texture_Enc(nn.Module):
def __init__(self, inter_dim=64):
super(Texture_Enc, self).__init__()
self.irrele_shape_encoder = nn.Sequential(
nn.Conv2d(64, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
)
def forward(self, x):
irrele_shape_output = self.irrele_shape_encoder(x)
return irrele_shape_output
class Generator(nn.Module):
def __init__(self, input_dim1 = 1, input_dim2=64, inter_dim=128):
super(Generator, self).__init__()
self.feat_generator = nn.Sequential(
nn.Conv2d(input_dim1 + input_dim2, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim // 2, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim // 2),
nn.PReLU(),
nn.Conv2d(inter_dim // 2, inter_dim // 2, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(inter_dim // 2),
nn.PReLU(),
nn.Conv2d(inter_dim // 2, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Tanh(),
)
def forward(self, align_attentions, irrele_shape_output):
assemble_align_attention = torch.sum(align_attentions, 1, True)
input = torch.cat((assemble_align_attention, irrele_shape_output), 1)
# input = torch.cat((align_attentions,irrele_shape_output),1)
output = self.feat_generator(input)
return output
class Land_Disc(nn.Module):
def __init__(self, land_num):
super(Land_Disc, self).__init__()
self.align_attention_features = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, land_num, kernel_size=3, stride=1, padding=1),
)
def forward(self, x):
align_output = self.align_attention_features(x)
return align_output
class Discriminator(nn.Module):
'''Discriminator model for source domain.'''
def __init__(self, input_dim=64, inter_dim = 64):
'''Init discriminator.'''
super(Discriminator, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(input_dim, inter_dim, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim),
nn.PReLU(),
nn.Conv2d(inter_dim, inter_dim * 2, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(inter_dim * 2),
nn.PReLU(),
nn.Conv2d(inter_dim * 2, inter_dim * 2, kernel_size=4, stride=2, padding=0),
# nn.InstanceNorm2d(inter_dim * 2),
nn.PReLU(),
nn.Conv2d(inter_dim * 2, 1, kernel_size=1, stride=1, padding=0)
)
self.input_dim = input_dim
def forward(self, input):
out = self.layer(input)
out = out.view(out.size(0), -1)
return out
network_dict = {'Feat_Enc':Feat_Enc, 'Land_Detect':Land_Detect, 'AU_Detect':AU_Detect, 'Land_Disc':Land_Disc,
'Texture_Enc':Texture_Enc, 'Generator':Generator, 'Discriminator':Discriminator} | 8,607 | 32.235521 | 109 | py |
ADLD | ADLD-master/util.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sklearn
from sklearn.metrics import accuracy_score, f1_score
def AU_detection_eval_src(loader, base_net, au_enc, use_gpu=True):
missing_label = 999
for i, batch in enumerate(loader):
input, label = batch
if use_gpu:
input, label = input.cuda(), label.cuda()
base_feat = base_net(input)
au_feat, au_output = au_enc(base_feat)
au_output = F.sigmoid(au_output)
if i == 0:
all_output = au_output.data.cpu().float()
all_label = label.data.cpu().float()
else:
all_output = torch.cat((all_output, au_output.data.cpu().float()), 0)
all_label = torch.cat((all_label, label.data.cpu().float()), 0)
AUoccur_pred_prob = all_output.data.numpy()
AUoccur_actual = all_label.data.numpy()
AUoccur_pred = np.zeros(AUoccur_pred_prob.shape)
AUoccur_pred[AUoccur_pred_prob < 0.5] = 0
AUoccur_pred[AUoccur_pred_prob >= 0.5] = 1
AUoccur_actual = AUoccur_actual.transpose((1, 0))
AUoccur_pred = AUoccur_pred.transpose((1, 0))
f1score_arr = np.zeros(AUoccur_actual.shape[0])
acc_arr = np.zeros(AUoccur_actual.shape[0])
for i in range(AUoccur_actual.shape[0]):
curr_actual = AUoccur_actual[i]
curr_pred = AUoccur_pred[i]
new_curr_actual = curr_actual[curr_actual != missing_label]
new_curr_pred = curr_pred[curr_actual != missing_label]
f1score_arr[i] = f1_score(new_curr_actual, new_curr_pred)
acc_arr[i] = accuracy_score(new_curr_actual, new_curr_pred)
return f1score_arr, acc_arr
def AU_detection_eval_tgt(loader, base_net, land_enc, au_enc, invar_shape_enc, feat_gen, use_gpu=True):
missing_label = 999
for i, batch in enumerate(loader):
input, label = batch
if use_gpu:
input, label = input.cuda(), label.cuda()
base_feat = base_net(input)
align_attention, align_feat, align_output = land_enc(base_feat)
invar_shape_output = invar_shape_enc(base_feat)
new_gen = feat_gen(align_attention, invar_shape_output)
new_gen_au_feat, new_gen_au_output = au_enc(new_gen)
au_output = F.sigmoid(new_gen_au_output)
if i == 0:
all_output = au_output.data.cpu().float()
all_label = label.data.cpu().float()
else:
all_output = torch.cat((all_output, au_output.data.cpu().float()), 0)
all_label = torch.cat((all_label, label.data.cpu().float()), 0)
AUoccur_pred_prob = all_output.data.numpy()
AUoccur_actual = all_label.data.numpy()
AUoccur_pred = np.zeros(AUoccur_pred_prob.shape)
AUoccur_pred[AUoccur_pred_prob < 0.5] = 0
AUoccur_pred[AUoccur_pred_prob >= 0.5] = 1
AUoccur_actual = AUoccur_actual.transpose((1, 0))
AUoccur_pred = AUoccur_pred.transpose((1, 0))
f1score_arr = np.zeros(AUoccur_actual.shape[0])
acc_arr = np.zeros(AUoccur_actual.shape[0])
for i in range(AUoccur_actual.shape[0]):
curr_actual = AUoccur_actual[i]
curr_pred = AUoccur_pred[i]
new_curr_actual = curr_actual[curr_actual != missing_label]
new_curr_pred = curr_pred[curr_actual != missing_label]
f1score_arr[i] = f1_score(new_curr_actual, new_curr_pred)
acc_arr[i] = accuracy_score(new_curr_actual, new_curr_pred)
return f1score_arr, acc_arr
def land_softmax_loss(input, target, weight=None, size_average=True, reduce=True):
classify_loss = nn.CrossEntropyLoss(weight=weight, size_average=size_average, reduce=reduce)
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = target[:, i]
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum()
def land_adaptation_loss(input, size_average=True, reduce=True):
classify_loss = nn.MSELoss(size_average=size_average, reduce=reduce)
use_gpu = torch.cuda.is_available()
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = torch.ones(t_input.size()) * 1.0 / t_input.size(1)
if use_gpu:
t_target = t_target.cuda()
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum()
def land_discriminator_loss(input, target, size_average=True, reduce=True):
classify_loss = nn.MSELoss(size_average=size_average, reduce=reduce)
use_gpu = torch.cuda.is_available()
for i in range(input.size(1)):
t_input = input[:, i, :, :]
t_input = t_input.view(t_input.size(0), -1)
t_target = torch.zeros(t_input.size())
if use_gpu:
t_target = t_target.cuda()
t_true_target = target[:, i]
for j in range(t_true_target.size(0)):
t_target[j, t_true_target[j]] = 1
t_loss = classify_loss(t_input, t_target)
t_loss = torch.unsqueeze(t_loss, 0)
if i == 0:
loss = t_loss
else:
loss = torch.cat((loss, t_loss), 0)
if size_average:
return loss.mean()
else:
return loss.sum() | 5,630 | 33.335366 | 103 | py |
ADLD | ADLD-master/pre_process.py | import numpy as np
from torchvision import transforms
from PIL import Image
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class SetFlip(object):
def __init__(self, flip):
self.flip = flip
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if self.flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class land_transform(object):
def __init__(self, output_size, scale, flip_reflect):
self.output_size = output_size
self.scale = scale
self.flip_reflect = flip_reflect.astype(int) - 1
def __call__(self, land, flip, offset_x, offset_y):
land_label = np.zeros((len(land) / 2))
land[0:len(land):2] = (land[0:len(land):2] - offset_x) / float(self.scale)
land[1:len(land):2] = (land[1:len(land):2] - offset_y) / float(self.scale)
# change the landmark orders when flipping
if flip:
land[0:len(land):2] = self.output_size - 1 - land[0:len(land):2]
land[0:len(land):2] = land[0:len(land):2][self.flip_reflect]
land[1:len(land):2] = land[1:len(land):2][self.flip_reflect]
# landmark location refinement for predefined AU centers
ruler = abs(land[2 * 22] - land[2 * 25])
land[2 * 4 + 1] = land[2 * 4 + 1] - ruler / 2
land[2 * 5 + 1] = land[2 * 5 + 1] - ruler / 2
land[2 * 1 + 1] = land[2 * 1 + 1] - ruler / 3
land[2 * 8 + 1] = land[2 * 8 + 1] - ruler / 3
land[2 * 2 + 1] = land[2 * 2 + 1] + ruler / 3
land[2 * 7 + 1] = land[2 * 7 + 1] + ruler / 3
land[2 * 24 + 1] = land[2 * 24 + 1] + ruler
land[2 * 29 + 1] = land[2 * 29 + 1] + ruler
land[2 * 15 + 1] = land[2 * 15 + 1] - ruler / 2
land[2 * 17 + 1] = land[2 * 17 + 1] - ruler / 2
land[2 * 39 + 1] = land[2 * 39 + 1] + ruler / 2
land[2 * 41 + 1] = land[2 * 41 + 1] + ruler / 2
land = (np.around(land)).astype(int)
for i in range(len(land) / 2):
land[2 * i] = min(max(land[2 * i], 0), self.output_size - 1)
land[2 * i + 1] = min(max(land[2 * i + 1], 0), self.output_size - 1)
land_label[i] = land[2 * i + 1] * self.output_size + land[2 * i]
return land_label
class image_train(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img, flip, offset_x, offset_y):
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
transform = transforms.Compose([
PlaceCrop(self.crop_size, offset_x, offset_y),
SetFlip(flip),
transforms.ToTensor(),
normalize
])
img = transform(img)
return img
def image_test(crop_size=176):
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
return transforms.Compose([
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize
]) | 3,931 | 32.322034 | 91 | py |
ADLD | ADLD-master/train.py | import argparse
import os
import torch.optim as optim
import torch.utils.data as util_data
import itertools
import network
import pre_process as prep
import lr_schedule
from util import *
from data_list import ImageList_au, ImageList_land_au
optim_dict = {'SGD': optim.SGD, 'Adam': optim.Adam}
def main(config):
## set loss criterion
use_gpu = torch.cuda.is_available()
au_weight_src = torch.from_numpy(np.loadtxt(config.src_train_path_prefix + '_weight.txt'))
if use_gpu:
au_weight_src = au_weight_src.float().cuda()
else:
au_weight_src = au_weight_src.float()
au_class_criterion = nn.BCEWithLogitsLoss(au_weight_src)
land_predict_criterion = land_softmax_loss
discriminator_criterion = nn.MSELoss()
reconstruct_criterion = nn.L1Loss()
land_discriminator_criterion = land_discriminator_loss
land_adaptation_criterion = land_adaptation_loss
## prepare data
dsets = {}
dset_loaders = {}
dsets['source'] = {}
dset_loaders['source'] = {}
dsets['source']['train'] = ImageList_land_au(config.crop_size, config.src_train_path_prefix,
transform=prep.image_train(crop_size=config.crop_size),
target_transform=prep.land_transform(output_size=config.output_size,
scale=config.crop_size / config.output_size,
flip_reflect=np.loadtxt(
config.flip_reflect)))
dset_loaders['source']['train'] = util_data.DataLoader(dsets['source']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
dsets['source']['val'] = ImageList_au(config.src_val_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['source']['val'] = util_data.DataLoader(dsets['source']['val'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
dsets['target'] = {}
dset_loaders['target'] = {}
dsets['target']['train'] = ImageList_land_au(config.crop_size, config.tgt_train_path_prefix,
transform=prep.image_train(crop_size=config.crop_size),
target_transform=prep.land_transform(output_size=config.output_size,
scale=config.crop_size / config.output_size,
flip_reflect=np.loadtxt(
config.flip_reflect)))
dset_loaders['target']['train'] = util_data.DataLoader(dsets['target']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
dsets['target']['val'] = ImageList_au(config.tgt_val_path_prefix,
transform=prep.image_test(crop_size=config.crop_size))
dset_loaders['target']['val'] = util_data.DataLoader(dsets['target']['val'], batch_size=config.eval_batch_size,
shuffle=False, num_workers=config.num_workers)
## set network modules
base_net = network.network_dict[config.base_net]()
land_enc = network.network_dict[config.land_enc](land_num=config.land_num)
land_enc_store = network.network_dict[config.land_enc](land_num=config.land_num)
au_enc = network.network_dict[config.au_enc](au_num=config.au_num)
invar_shape_enc = network.network_dict[config.invar_shape_enc]()
feat_gen = network.network_dict[config.feat_gen]()
invar_shape_disc = network.network_dict[config.invar_shape_disc](land_num=config.land_num)
feat_gen_disc_src = network.network_dict[config.feat_gen_disc]()
feat_gen_disc_tgt = network.network_dict[config.feat_gen_disc]()
if config.start_epoch > 0:
base_net.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/base_net_' + str(config.start_epoch) + '.pth'))
land_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/land_enc_' + str(config.start_epoch) + '.pth'))
au_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/au_enc_' + str(config.start_epoch) + '.pth'))
invar_shape_enc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(config.start_epoch) + '.pth'))
feat_gen.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_' + str(config.start_epoch) + '.pth'))
invar_shape_disc.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/invar_shape_disc_' + str(config.start_epoch) + '.pth'))
feat_gen_disc_src.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_disc_src_' + str(config.start_epoch) + '.pth'))
feat_gen_disc_tgt.load_state_dict(torch.load(config.write_path_prefix + config.mode + '/feat_gen_disc_tgt_' + str(config.start_epoch) + '.pth'))
if use_gpu:
base_net = base_net.cuda()
land_enc = land_enc.cuda()
land_enc_store = land_enc_store.cuda()
au_enc = au_enc.cuda()
invar_shape_enc = invar_shape_enc.cuda()
feat_gen = feat_gen.cuda()
invar_shape_disc = invar_shape_disc.cuda()
feat_gen_disc_src = feat_gen_disc_src.cuda()
feat_gen_disc_tgt = feat_gen_disc_tgt.cuda()
## collect parameters
base_net_parameter_list = [{'params': filter(lambda p: p.requires_grad, base_net.parameters()), 'lr': 1}]
land_enc_parameter_list = [{'params': filter(lambda p: p.requires_grad, land_enc.parameters()), 'lr': 1}]
au_enc_parameter_list = [{'params': filter(lambda p: p.requires_grad, au_enc.parameters()), 'lr': 1}]
invar_shape_enc_parameter_list = [
{'params': filter(lambda p: p.requires_grad, invar_shape_enc.parameters()), 'lr': 1}]
feat_gen_parameter_list = [{'params': filter(lambda p: p.requires_grad, feat_gen.parameters()), 'lr': 1}]
invar_shape_disc_parameter_list = [
{'params': filter(lambda p: p.requires_grad, invar_shape_disc.parameters()), 'lr': 1}]
feat_gen_disc_src_parameter_list = [
{'params': filter(lambda p: p.requires_grad, feat_gen_disc_src.parameters()), 'lr': 1}]
feat_gen_disc_tgt_parameter_list = [
{'params': filter(lambda p: p.requires_grad, feat_gen_disc_tgt.parameters()), 'lr': 1}]
## set optimizer
Gen_optimizer = optim_dict[config.gen_optimizer_type](
itertools.chain(invar_shape_enc_parameter_list, feat_gen_parameter_list),
1.0, [config.gen_beta1, config.gen_beta2])
Task_optimizer = optim_dict[config.task_optimizer_type](
itertools.chain(base_net_parameter_list, land_enc_parameter_list, au_enc_parameter_list),
1.0, [config.task_beta1, config.task_beta2])
Disc_optimizer = optim_dict[config.gen_optimizer_type](
itertools.chain(invar_shape_disc_parameter_list, feat_gen_disc_src_parameter_list,
feat_gen_disc_tgt_parameter_list), 1.0, [config.gen_beta1, config.gen_beta2])
Gen_param_lr = []
for param_group in Gen_optimizer.param_groups:
Gen_param_lr.append(param_group['lr'])
Task_param_lr = []
for param_group in Task_optimizer.param_groups:
Task_param_lr.append(param_group['lr'])
Disc_param_lr = []
for param_group in Disc_optimizer.param_groups:
Disc_param_lr.append(param_group['lr'])
Gen_lr_scheduler = lr_schedule.schedule_dict[config.gen_lr_type]
Task_lr_scheduler = lr_schedule.schedule_dict[config.task_lr_type]
Disc_lr_scheduler = lr_schedule.schedule_dict[config.gen_lr_type]
print(base_net, land_enc, au_enc, invar_shape_enc, feat_gen)
print(invar_shape_disc, feat_gen_disc_src, feat_gen_disc_tgt)
if not os.path.exists(config.write_path_prefix + config.mode):
os.makedirs(config.write_path_prefix + config.mode)
if not os.path.exists(config.write_res_prefix + config.mode):
os.makedirs(config.write_res_prefix + config.mode)
val_type = 'target' # 'source'
res_file = open(config.write_res_prefix + config.mode + '/' + val_type + '_AU_pred_' + str(config.start_epoch) + '.txt', 'w')
## train
len_train_tgt = len(dset_loaders['target']['train'])
count = 0
for epoch in range(config.start_epoch, config.n_epochs + 1):
# eval in the train
if epoch >= config.start_epoch:
base_net.train(False)
land_enc.train(False)
au_enc.train(False)
invar_shape_enc.train(False)
feat_gen.train(False)
if val_type == 'source':
f1score_arr, acc_arr = AU_detection_eval_src(dset_loaders[val_type]['val'], base_net, au_enc, use_gpu=use_gpu)
else:
f1score_arr, acc_arr = AU_detection_eval_tgt(dset_loaders[val_type]['val'], base_net, land_enc, au_enc,
invar_shape_enc, feat_gen, use_gpu=use_gpu)
print('epoch =%d, f1 score mean=%f, accuracy mean=%f' %(epoch, f1score_arr.mean(), acc_arr.mean()))
print>> res_file, '%d\t%f\t%f' % (epoch, f1score_arr.mean(), acc_arr.mean())
base_net.train(True)
land_enc.train(True)
au_enc.train(True)
invar_shape_enc.train(True)
feat_gen.train(True)
if epoch > config.start_epoch:
print('taking snapshot ...')
torch.save(base_net.state_dict(), config.write_path_prefix + config.mode + '/base_net_' + str(epoch) + '.pth')
torch.save(land_enc.state_dict(), config.write_path_prefix + config.mode + '/land_enc_' + str(epoch) + '.pth')
torch.save(au_enc.state_dict(), config.write_path_prefix + config.mode + '/au_enc_' + str(epoch) + '.pth')
torch.save(invar_shape_enc.state_dict(), config.write_path_prefix + config.mode + '/invar_shape_enc_' + str(epoch) + '.pth')
torch.save(feat_gen.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_' + str(epoch) + '.pth')
torch.save(invar_shape_disc.state_dict(), config.write_path_prefix + config.mode + '/invar_shape_disc_' + str(epoch) + '.pth')
torch.save(feat_gen_disc_src.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_disc_src_' + str(epoch) + '.pth')
torch.save(feat_gen_disc_tgt.state_dict(), config.write_path_prefix + config.mode + '/feat_gen_disc_tgt_' + str(epoch) + '.pth')
if epoch >= config.n_epochs:
break
for i, batch_src in enumerate(dset_loaders['source']['train']):
if i % config.display == 0 and count > 0:
print(
'[epoch = %d][iter = %d][loss_disc = %f][loss_invar_shape_disc = %f][loss_gen_disc = %f][total_loss = %f][loss_invar_shape_adaptation = %f][loss_gen_adaptation = %f][loss_self_recons = %f][loss_gen_cycle = %f][loss_au = %f][loss_land = %f]' % (
epoch, i, loss_disc.data.cpu().numpy(), loss_invar_shape_disc.data.cpu().numpy(),
loss_gen_disc.data.cpu().numpy(), total_loss.data.cpu().numpy(),
loss_invar_shape_adaptation.data.cpu().numpy(), loss_gen_adaptation.data.cpu().numpy(),
loss_self_recons.data.cpu().numpy(), loss_gen_cycle.data.cpu().numpy(),
loss_au.data.cpu().numpy(), loss_land.data.cpu().numpy()))
print('learning rate = %f, %f, %f' % (Disc_optimizer.param_groups[0]['lr'], Gen_optimizer.param_groups[0]['lr'], Task_optimizer.param_groups[0]['lr']))
print('the number of training iterations is %d' % (count))
input_src, land_src, au_src = batch_src
if count % len_train_tgt == 0:
if count > 0:
dset_loaders['target']['train'] = util_data.DataLoader(dsets['target']['train'], batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
iter_data_tgt = iter(dset_loaders['target']['train'])
input_tgt, land_tgt, au_tgt = iter_data_tgt.next()
if input_tgt.size(0) > input_src.size(0):
input_tgt, land_tgt, au_tgt = input_tgt[0:input_src.size(0), :, :, :], land_tgt[0:input_src.size(0),
:], au_tgt[
0:input_src.size(0)]
elif input_tgt.size(0) < input_src.size(0):
input_src, land_src, au_src = input_src[0:input_tgt.size(0), :, :, :], land_src[0:input_tgt.size(0),
:], au_src[
0:input_tgt.size(0)]
if use_gpu:
input_src, land_src, au_src, input_tgt, land_tgt, au_tgt = \
input_src.cuda(), land_src.long().cuda(), au_src.float().cuda(), \
input_tgt.cuda(), land_tgt.long().cuda(), au_tgt.float().cuda()
else:
land_src, au_src, land_tgt, au_tgt = \
land_src.long(), au_src.float(), land_tgt.long(), au_tgt.float()
land_enc_store.load_state_dict(land_enc.state_dict())
base_feat_src = base_net(input_src)
align_attention_src, align_feat_src, align_output_src = land_enc(base_feat_src)
au_feat_src, au_output_src = au_enc(base_feat_src)
base_feat_tgt = base_net(input_tgt)
align_attention_tgt, align_feat_tgt, align_output_tgt = land_enc(base_feat_tgt)
au_feat_tgt, au_output_tgt = au_enc(base_feat_tgt)
invar_shape_output_src = invar_shape_enc(base_feat_src.detach())
invar_shape_output_tgt = invar_shape_enc(base_feat_tgt.detach())
# new_gen
new_gen_tgt = feat_gen(align_attention_src.detach(), invar_shape_output_tgt)
new_gen_src = feat_gen(align_attention_tgt.detach(), invar_shape_output_src)
# recons_gen
recons_gen_src = feat_gen(align_attention_src.detach(), invar_shape_output_src)
recons_gen_tgt = feat_gen(align_attention_tgt.detach(), invar_shape_output_tgt)
# new2_gen
new_gen_invar_shape_output_src = invar_shape_enc(new_gen_src.detach())
new_gen_invar_shape_output_tgt = invar_shape_enc(new_gen_tgt.detach())
new_gen_align_attention_src, new_gen_align_feat_src, new_gen_align_output_src = land_enc_store(new_gen_src)
new_gen_align_attention_tgt, new_gen_align_feat_tgt, new_gen_align_output_tgt = land_enc_store(new_gen_tgt)
new2_gen_tgt = feat_gen(new_gen_align_attention_src.detach(), new_gen_invar_shape_output_tgt)
new2_gen_src = feat_gen(new_gen_align_attention_tgt.detach(), new_gen_invar_shape_output_src)
############################
# 1. train discriminator #
############################
Disc_optimizer = Disc_lr_scheduler(Disc_param_lr, Disc_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.gen_lr)
Disc_optimizer.zero_grad()
align_output_invar_shape_src = invar_shape_disc(
invar_shape_output_src.detach())
align_output_invar_shape_tgt = invar_shape_disc(
invar_shape_output_tgt.detach())
# loss_invar_shape_disc
loss_base_invar_shape_disc_src = land_discriminator_criterion(align_output_invar_shape_src, land_src)
loss_base_invar_shape_disc_tgt = land_discriminator_criterion(align_output_invar_shape_tgt, land_tgt)
loss_invar_shape_disc = (loss_base_invar_shape_disc_src + loss_base_invar_shape_disc_tgt) * 0.5
base_gen_src_pred = feat_gen_disc_src(base_feat_src.detach())
new_gen_src_pred = feat_gen_disc_src(new_gen_src.detach())
real_label = torch.ones((base_feat_src.size(0), 1))
fake_label = torch.zeros((base_feat_src.size(0), 1))
if use_gpu:
real_label, fake_label = real_label.cuda(), fake_label.cuda()
# loss_gen_disc_src
loss_base_gen_src = discriminator_criterion(base_gen_src_pred, real_label)
loss_new_gen_src = discriminator_criterion(new_gen_src_pred, fake_label)
loss_gen_disc_src = (loss_base_gen_src + loss_new_gen_src) * 0.5
base_gen_tgt_pred = feat_gen_disc_tgt(base_feat_tgt.detach())
new_gen_tgt_pred = feat_gen_disc_tgt(new_gen_tgt.detach())
# loss_gen_disc_tgt
loss_base_gen_tgt = discriminator_criterion(base_gen_tgt_pred, real_label)
loss_new_gen_tgt = discriminator_criterion(new_gen_tgt_pred, fake_label)
loss_gen_disc_tgt = (loss_base_gen_tgt + loss_new_gen_tgt) * 0.5
# loss_gen_disc
loss_gen_disc = (loss_gen_disc_src + loss_gen_disc_tgt) * 0.5
loss_disc = loss_invar_shape_disc + loss_gen_disc
loss_disc.backward()
# optimize discriminator
Disc_optimizer.step()
############################
# 2. train base network #
############################
Gen_optimizer = Gen_lr_scheduler(Gen_param_lr, Gen_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.gen_lr)
Gen_optimizer.zero_grad()
Task_optimizer = Task_lr_scheduler(Task_param_lr, Task_optimizer, epoch, config.n_epochs,
1, config.decay_start_epoch, config.task_lr)
Task_optimizer.zero_grad()
align_output_invar_shape_src = invar_shape_disc(invar_shape_output_src)
align_output_invar_shape_tgt = invar_shape_disc(invar_shape_output_tgt)
# loss_invar_shape_adaptation
loss_base_invar_shape_adaptation_src = land_adaptation_criterion(align_output_invar_shape_src)
loss_base_invar_shape_adaptation_tgt = land_adaptation_criterion(align_output_invar_shape_tgt)
loss_invar_shape_adaptation = (
loss_base_invar_shape_adaptation_src + loss_base_invar_shape_adaptation_tgt) * 0.5
new_gen_src_pred = feat_gen_disc_src(new_gen_src)
loss_gen_adaptation_src = discriminator_criterion(new_gen_src_pred, real_label)
new_gen_tgt_pred = feat_gen_disc_tgt(new_gen_tgt)
loss_gen_adaptation_tgt = discriminator_criterion(new_gen_tgt_pred, real_label)
# loss_gen_adaptation
loss_gen_adaptation = (loss_gen_adaptation_src + loss_gen_adaptation_tgt) * 0.5
loss_gen_cycle_src = reconstruct_criterion(new2_gen_src, base_feat_src.detach())
loss_gen_cycle_tgt = reconstruct_criterion(new2_gen_tgt, base_feat_tgt.detach())
# loss_gen_cycle
loss_gen_cycle = (loss_gen_cycle_src + loss_gen_cycle_tgt) * 0.5
loss_self_recons_src = reconstruct_criterion(recons_gen_src, base_feat_src.detach())
loss_self_recons_tgt = reconstruct_criterion(recons_gen_tgt, base_feat_tgt.detach())
# loss_self_recons
loss_self_recons = (loss_self_recons_src + loss_self_recons_tgt) * 0.5
loss_base_gen_au_src = au_class_criterion(au_output_src, au_src)
loss_base_gen_au_tgt = au_class_criterion(au_output_tgt, au_tgt)
loss_base_gen_land_src = land_predict_criterion(align_output_src, land_src)
loss_base_gen_land_tgt = land_predict_criterion(align_output_tgt, land_tgt)
new_gen_au_feat_src, new_gen_au_output_src = au_enc(new_gen_src)
new_gen_au_feat_tgt, new_gen_au_output_tgt = au_enc(new_gen_tgt)
loss_new_gen_au_src = au_class_criterion(new_gen_au_output_src, au_tgt)
loss_new_gen_au_tgt = au_class_criterion(new_gen_au_output_tgt, au_src)
loss_new_gen_land_src = land_predict_criterion(new_gen_align_output_src, land_tgt)
loss_new_gen_land_tgt = land_predict_criterion(new_gen_align_output_tgt, land_src)
# loss_land
loss_land = (
loss_base_gen_land_src + loss_base_gen_land_tgt + loss_new_gen_land_src + loss_new_gen_land_tgt) * 0.5
# loss_au
if config.mode == 'weak':
loss_au = (loss_base_gen_au_src + loss_new_gen_au_tgt) * 0.5
else:
loss_au = (loss_base_gen_au_src + loss_base_gen_au_tgt + loss_new_gen_au_src + loss_new_gen_au_tgt) * 0.25
total_loss = config.lambda_land_adv * loss_invar_shape_adaptation + \
config.lambda_feat_adv * loss_gen_adaptation + \
config.lambda_cross_cycle * loss_gen_cycle + config.lambda_self_recons * loss_self_recons + \
config.lambda_au * loss_au + config.lambda_land * loss_land
total_loss.backward()
Gen_optimizer.step()
Task_optimizer.step()
count = count + 1
res_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--gpu_id', type=str, default='0', help='device id to run')
parser.add_argument('--crop_size', type=int, default=176, help='crop size for images')
parser.add_argument('--output_size', type=int, default=44, help='size for landmark response map')
parser.add_argument('--au_num', type=int, default=6, help='number of AUs')
parser.add_argument('--land_num', type=int, default=49, help='number of landmarks')
parser.add_argument('--train_batch_size', type=int, default=16, help='mini-batch size for training')
parser.add_argument('--eval_batch_size', type=int, default=4, help='mini-batch size for evaluation')
parser.add_argument('--start_epoch', type=int, default=0, help='starting epoch')
parser.add_argument('--n_epochs', type=int, default=10, help='number of total epochs')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--mode', type=str, default='weak', choices=['weak', 'full'])
parser.add_argument('--base_net', type=str, default='Feat_Enc')
parser.add_argument('--land_enc', type=str, default='Land_Detect')
parser.add_argument('--au_enc', type=str, default='AU_Detect')
parser.add_argument('--invar_shape_enc', type=str, default='Texture_Enc')
parser.add_argument('--feat_gen', type=str, default='Generator')
parser.add_argument('--invar_shape_disc', type=str, default='Land_Disc')
parser.add_argument('--feat_gen_disc', type=str, default='Discriminator')
# Training configuration.
parser.add_argument('--lambda_au', type=float, default=1, help='weight for AU detection loss')
parser.add_argument('--lambda_land', type=float, default=0.6, help='weight for landmark detection loss')
parser.add_argument('--lambda_land_adv', type=float, default=400, help='weight for landmark adversarial loss')
parser.add_argument('--lambda_feat_adv', type=float, default=1.2, help='weight for feature adversarial loss')
parser.add_argument('--lambda_cross_cycle', type=float, default=40, help='weight for cross-cycle consistency loss')
parser.add_argument('--lambda_self_recons', type=float, default=3, help='weight for self-reconstruction loss')
parser.add_argument('--display', type=int, default=100, help='iteration gaps for displaying')
parser.add_argument('--gen_optimizer_type', type=str, default='Adam')
parser.add_argument('--gen_beta1', type=float, default=0.5, help='beta1 for Adam optimizer of generation')
parser.add_argument('--gen_beta2', type=float, default=0.9, help='beta2 for Adam optimizer of generation')
parser.add_argument('--gen_lr_type', type=str, default='lambda')
parser.add_argument('--gen_lr', type=float, default=5e-5, help='learning rate for generation')
parser.add_argument('--task_optimizer_type', type=str, default='Adam')
parser.add_argument('--task_beta1', type=float, default=0.95, help='beta1 for Adam optimizer of task')
parser.add_argument('--task_beta2', type=float, default=0.999, help='beta2 for Adam optimizer of task')
parser.add_argument('--task_lr_type', type=str, default='lambda')
parser.add_argument('--task_lr', type=float, default=1e-4, help='learning rate for task')
parser.add_argument('--decay_start_epoch', type=int, default=5, help='epoch for decaying lr')
# Directories.
parser.add_argument('--write_path_prefix', type=str, default='data/snapshots/')
parser.add_argument('--write_res_prefix', type=str, default='data/res/')
parser.add_argument('--flip_reflect', type=str, default='data/list/reflect_49.txt')
parser.add_argument('--src_train_path_prefix', type=str, default='data/list/BP4D_train')
parser.add_argument('--src_val_path_prefix', type=str, default='data/list/BP4D_val')
parser.add_argument('--src_test_path_prefix', type=str, default='data/list/BP4D_test')
parser.add_argument('--tgt_train_path_prefix', type=str, default='data/list/emotioNet_train')
parser.add_argument('--tgt_val_path_prefix', type=str, default='data/list/emotioNet_val')
parser.add_argument('--tgt_test_path_prefix', type=str, default='data/list/emotioNet_test')
config = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
print(config)
main(config) | 26,351 | 58.485327 | 272 | py |
MICO | MICO-main/training/train_purchase100.py | import os
import argparse
import warnings
import git
import csv
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchcsprng import create_mt19937_generator, create_random_device_generator
from torch.utils.data import DataLoader
from opacus import PrivacyEngine
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from prv_accountant.dpsgd import find_noise_multiplier
from accountant import PRVAccountant
from mico_competition import ChallengeDataset, MLP, load_purchase100
from tqdm import tqdm, trange
from datetime import datetime
from typing import Callable, Optional
def accuracy(preds: torch.Tensor, labels: torch.Tensor) -> float:
return (preds == labels).mean()
def train(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
train_loader: DataLoader,
criterion,
optimizer: optim.Optimizer,
epoch: int,
compute_epsilon: Optional[Callable[[int], float]] = None):
model.train()
losses = []
top1_acc = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=args.max_physical_batch_size,
optimizer=optimizer
) as memory_safe_data_loader:
if args.disable_dp:
data_loader = train_loader
else:
data_loader = memory_safe_data_loader
# BatchSplittingSampler.__len__() approximates (badly) the length in physical batches
# See https://github.com/pytorch/opacus/issues/516
# We instead heuristically keep track of logical batches processed
pbar = tqdm(data_loader, desc="Batch", unit="batch", position=1, leave=True, total=len(train_loader), disable=None)
logical_batch_len = 0
for i, (inputs, target) in enumerate(data_loader):
inputs = inputs.to(device)
target = target.to(device)
logical_batch_len += len(target)
if logical_batch_len >= args.batch_size:
pbar.update(1)
logical_batch_len = logical_batch_len % args.max_physical_batch_size
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
loss.backward()
optimizer.step()
if (pbar.n + 1) % args.logging_steps == 0 or (pbar.n + 1) == pbar.total:
if not args.disable_dp:
epsilon = compute_epsilon(delta=args.target_delta)
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp=f"(ε={epsilon:.2f}, δ={args.target_delta})"
)
else:
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp="(ε = ∞, δ = 0)"
)
pbar.update(pbar.total - pbar.n)
def test(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
test_loader: DataLoader,
criterion):
model.eval()
losses = []
top1_acc = []
with torch.no_grad():
for inputs, target in tqdm(test_loader, desc="Test ", unit="batch", disable=None):
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
top1_avg = np.mean(top1_acc)
loss_avg = np.mean(losses)
print(
f"Test Loss : {loss_avg:.6f}\n"
f"Test Accuracy: {top1_avg * 100:.6f}"
)
return np.mean(top1_acc)
def main(args: argparse.Namespace):
noise_generator = None
if not args.secure_mode and args.train_seed is not None:
# Following the advice on https://pytorch.org/docs/1.8.1/notes/randomness.html
if torch.cuda.is_available():
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
torch.use_deterministic_algorithms(True)
torch.cuda.manual_seed(args.train_seed)
torch.cuda.manual_seed_all(args.train_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import random
random.seed(args.train_seed)
os.environ['PYTHONHASHSEED'] = str(args.train_seed)
# Required to get deterministic batches because Opacus uses secure_rng as a generator for
# train_loader when poisson_sampling = True even though secure_mode = False, which sets secure_rng = None
# https://github.com/pytorch/opacus/blob/5e632cdb8d497aade29e5555ad79921c239c78f7/opacus/privacy_engine.py#L206
torch.manual_seed(args.train_seed)
np.random.seed(args.train_seed)
noise_generator = create_mt19937_generator(args.train_seed)
if (args.seed_challenge is None or args.seed_training is None or args.seed_membership is None):
if args.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.seed_challenge, args.seed_training, args.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.seed_challenge}\n"
f" seed_training = {args.seed_training}\n"
f" seed_membership = {args.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_purchase100(dataset_dir=args.dataset_dir)
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.len_challenge,
len_training=args.len_training,
seed_challenge=args.seed_challenge,
seed_training=args.seed_training,
seed_membership=args.seed_membership)
train_dataset = challenge_dataset.get_train_dataset()
test_dataset = challenge_dataset.get_eval_dataset()
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.max_physical_batch_size,
num_workers=args.dataloader_num_workers
)
# Supress warnings
warnings.filterwarnings(action="ignore", module="opacus", message=".*Secure RNG turned off")
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = MLP()
assert ModuleValidator.is_valid(model)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
# Not the same as args.batch_size / len(train_dataset)
args.sample_rate = 1 / len(train_loader)
num_steps = int(len(train_loader) * args.num_epochs)
if not args.disable_dp:
args.noise_multiplier = find_noise_multiplier(
sampling_probability=args.sample_rate,
num_steps=num_steps,
target_epsilon=args.target_epsilon,
target_delta=args.target_delta,
eps_error=0.1
)
privacy_engine = PrivacyEngine(secure_mode=args.secure_mode)
# Override Opacus accountant
# Revise if https://github.com/pytorch/opacus/pull/493 is merged
privacy_engine.accountant = PRVAccountant(
noise_multiplier=args.noise_multiplier,
sample_rate=args.sample_rate,
max_steps=num_steps,
eps_error=0.1,
delta_error=1e-9)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.noise_multiplier,
max_grad_norm=args.max_grad_norm,
poisson_sampling=True,
noise_generator=noise_generator
)
print(f"Training using DP-SGD with {optimizer.original_optimizer.__class__.__name__} optimizer\n"
f" noise multiplier σ = {optimizer.noise_multiplier},\n"
f" clipping norm C = {optimizer.max_grad_norm:},\n"
f" average batch size L = {args.batch_size},\n"
f" sample rate = {args.sample_rate},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)\n"
f" to target ε = {args.target_epsilon}, δ = {args.target_delta}")
compute_epsilon: Optional[Callable[[float], float]] = lambda delta: privacy_engine.get_epsilon(delta=delta)
else:
print(f"Training using SGD with {optimizer.__class__.__name__} optimizer\n"
f" batch size L = {args.batch_size},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)")
compute_epsilon = None
# Must be initialized after attaching the privacy engine.
# See https://discuss.pytorch.org/t/how-to-use-lr-scheduler-in-opacus/111718
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step, gamma=args.lr_scheduler_gamma)
pbar = trange(args.num_epochs, desc="Epoch", unit="epoch", position=0, leave=True, disable=None)
for epoch in pbar:
pbar.set_postfix(lr=f"{scheduler.get_last_lr()}")
train(args, model, device, train_loader, criterion, optimizer, epoch + 1, compute_epsilon=compute_epsilon)
scheduler.step()
acc = test(args, model, device, test_loader, criterion)
with open(os.path.join(args.output_dir, "accuracy"), "w") as f:
print(f"{acc:.3f}", file=f)
if not args.disable_dp:
final_epsilon = compute_epsilon(args.target_delta)
print(f"The trained model is (ε = {final_epsilon}, δ = {args.target_delta})-DP")
with open(os.path.join(args.output_dir, "epsilon"), "w") as f:
print(f"{final_epsilon:.3f}", file=f)
with open(os.path.join(args.output_dir, "seed_challenge"), "w") as f:
print(f"{args.seed_challenge}", file=f)
with open(os.path.join(args.output_dir, "seed_training"), "w") as f:
print(f"{args.seed_training}", file=f)
with open(os.path.join(args.output_dir, "seed_membership"), "w") as f:
print(f"{args.seed_membership}", file=f)
with open(os.path.join(args.output_dir, "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=int, metavar='ID',
help="an identifier for the trained model")
# Seeds
parser.add_argument("--train_seed", type=int, metavar='TS',
help="seed for reproducibility")
parser.add_argument("--split_seed", type=int, metavar='SS',
help="seed to deterministically generate the 3 seeds for creating splits "
"(--seed_challenge, --seed_trainig, seed_membership)")
parser.add_argument("--seed_challenge", type=int, metavar='SC',
help="seed to select challenge examples")
parser.add_argument("--seed_training", type=int, metavar='ST',
help="seed to select non-challenge training examples")
parser.add_argument("--seed_membership", type=int, metavar='SM',
help="seed to split challenge examples into members/non-members")
# Split lengths
parser.add_argument("--len_training", type=int, metavar="N", required=True,
help="(required) number of examples used for training")
parser.add_argument("--len_challenge", type=int, metavar="m", required=True,
help="(required) number of member and non-member challenge examples "
"(i.e., m members and m non-members)")
# General
parser.add_argument("--secure_mode", action="store_true", default=False,
help="whether to use Opacus secure mode for training (default=True)")
parser.add_argument("--disable_dp", action="store_true", default=False,
help="whether to disable differentially private training altogether (default=False)")
parser.add_argument("--dataloader_num_workers", type=int, metavar='W', default=2,
help="number of workers for data loading. 0 means that the data will be loaded in the main process (default=2). "
"See torch.utils.data.DataLoader")
parser.add_argument("--logging_steps", type=int, metavar='k', default=10,
help="prints accuracy, loss, and privacy accounting information during training every k logical batches "
"(default=10)")
parser.add_argument("--dataset_dir", type=str, metavar="DATA", default=".",
help="root directory for cached dataset (default='.')")
parser.add_argument("--output_dir", type=str, metavar="OUT",
help="output directory. If none given, will pick one based on hyperparameters")
# Training hyperparameters
parser.add_argument("--target_epsilon", type=float, metavar="EPSILON",
help="target DP epsilon. Required unless specifying --disable_dp")
parser.add_argument("--target_delta", type=float, metavar="DELTA",
help="target DP delta. Will use 1/N if unspecified")
parser.add_argument("--batch_size", type=int, metavar="L",
help="expected logical batch size; determines the sample rate of DP-SGD. "
"Actual batch size varies because batches are constructed using Poisson sampling")
parser.add_argument("--max_physical_batch_size", type=int, metavar="B",
help="maximum physical batch size. Use to simulate logical batches larger than available memory and "
"to safeguard against unusually large batches produces by Poisson sampling. "
"See opacus.utils.batch_memory_manager.BatchMemoryManager")
parser.add_argument("--num_epochs", metavar='E', type=int, default=10,
help="number of training epochs (default=10)")
parser.add_argument("--max_grad_norm", type=float, metavar='C', default=1.0,
help="clipping norm for per-sample gradients in DP-SGD (default=1.0)")
parser.add_argument("--learning_rate", type=float, metavar="LR", default=1.0,
help="initial learning rate (default=1.0)")
parser.add_argument("--lr_scheduler_gamma", type=float, metavar="GAMMA", default=1.0,
help="gamma parameter for exponential learning rate scheduler")
parser.add_argument("--lr_scheduler_step", type=int, metavar="S", default=1,
help="step size for exponential learning rate scheduler")
args = parser.parse_args()
if args.len_training is None:
raise ValueError("Please specify --len_training")
if args.len_challenge is None:
raise ValueError("Please specify --len_challenge")
# Parameter validation
if args.secure_mode and args.train_seed is not None:
raise ValueError("Specify either secure mode or a seed for reproducibility, but not both")
if args.target_delta is None:
args.target_delta = 1 / args.len_training
if args.split_seed is not None and (args.seed_challenge is not None or args.seed_training is not None or args.seed_membership is not None):
raise ValueError("A --split_seed was given to generate seeds to construct splits but at least one explicit seed was specified. Bailing out.")
if args.output_dir is None:
now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.disable_dp:
args.output_dir = f"{now}-nodp-lr{args.learning_rate}-gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-" + \
f"E{args.num_epochs}"
else:
args.output_dir = f"{now}-eps{args.target_epsilon}-delta{args.target_delta}-lr{args.learning_rate}-" + \
f"gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-E{args.num_epochs}-C{args.max_grad_norm}" + \
f"{'-secure' if args.secure_mode else ''}"
print(f"No --output_dir specified. Will use {args.output_dir}")
if args.model_id is not None:
args.output_dir = args.output_dir + f"_{args.model_id}"
os.makedirs(args.output_dir, exist_ok=True)
with open(os.path.join(args.output_dir, "arguments"), "w") as argfile:
try:
commit_hash = git.Repo(".", search_parent_directories=True).git.rev_parse("HEAD")
except git.exc.InvalidGitRepositoryError:
commit_hash = "unknown"
print(f"Commit hash: {commit_hash}")
print(f"# Commit hash: {commit_hash}", file=argfile)
for arg in vars(args):
print(f"--{arg} {getattr(args, arg)}")
print(f"--{arg} {getattr(args, arg)}", file=argfile)
main(args)
| 17,940 | 41.921053 | 149 | py |
MICO | MICO-main/training/train_sst2.py | import numpy as np
import pandas as pd
import os
import torch
import sys
import csv
import yaml
import warnings
import datasets
from opacus import PrivacyEngine
from dp_transformers import TrainingArguments, PrivacyArguments, PrivacyEngineCallback
from prv_accountant.dpsgd import find_noise_multiplier, DPSGDAccountant
from torchcsprng import create_mt19937_generator, create_random_device_generator
from transformers import (
HfArgumentParser, AutoTokenizer, AutoModelForSequenceClassification,
Trainer, EvalPrediction, PreTrainedTokenizerBase
)
from dataclasses import dataclass
from pathlib import Path
from mico_competition import ChallengeDataset, load_sst2
from typing import Optional
@dataclass
class ModelArguments:
model_name: str
@dataclass
class DataArguments:
model_index: int
len_training: int = 67349
len_challenge: int = 100
seed_challenge: Optional[int] = None
seed_training: Optional[int] = None
seed_membership: Optional[int] = None
split_seed: Optional[int] = None
@dataclass
class SecurePrivacyArguments(PrivacyArguments):
delta: float = None
use_secure_prng: bool = False
@dataclass
class Arguments:
training: TrainingArguments
model: ModelArguments
privacy: SecurePrivacyArguments
data: DataArguments
def preprocess_text(D: datasets.DatasetDict, tokenizer: PreTrainedTokenizerBase,
max_sequence_length: int = None) -> datasets.DatasetDict:
processed_data = D.map(
lambda batch: tokenizer(batch["sentence"], padding="max_length", max_length=max_sequence_length),
batched=True
)
return processed_data.remove_columns(["sentence"])
def load_dataset() -> datasets.DatasetDict:
if (args.data.seed_challenge is None or args.data.seed_training is None or args.data.seed_membership is None):
if args.data.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.data.seed_challenge, args.data.seed_training, args.data.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.data.seed_challenge}\n"
f" seed_training = {args.data.seed_training}\n"
f" seed_membership = {args.data.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_sst2()
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.data.len_challenge,
len_training=args.data.len_training,
seed_challenge=args.data.seed_challenge,
seed_training=args.data.seed_training,
seed_membership=args.data.seed_membership)
with open(os.path.join(args.training.output_dir, "challenge", "seed_challenge"), "w") as f:
print(f"{args.data.seed_challenge}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "seed_training"), "w") as f:
print(f"{args.data.seed_training}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "seed_membership"), "w") as f:
print(f"{args.data.seed_membership}", file=f)
with open(os.path.join(args.training.output_dir, "challenge", "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
ds_train = pd.DataFrame.from_records(challenge_dataset.get_train_dataset())
ds_test = pd.DataFrame.from_records(challenge_dataset.get_eval_dataset())
return datasets.DatasetDict({
"train": datasets.Dataset.from_pandas(ds_train),
"test": datasets.Dataset.from_pandas(ds_test)
}).remove_columns("idx")
def main(args: Arguments):
output_dir = Path(args.training.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
with open(os.path.join(args.training.output_dir, "arguments.yml"), "w") as f:
yaml.dump(args, f)
print(yaml.dump(args))
os.mkdir(output_dir/"challenge")
ds = load_dataset()
if args.privacy.use_secure_prng:
import torchcsprng as csprng
mt19937_gen = csprng.create_mt19937_generator()
ds['train'] = ds['train'].select(torch.randperm(len(ds['train']), generator=mt19937_gen).tolist())
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = AutoModelForSequenceClassification.from_pretrained(args.model.model_name, num_labels=2)
tokenizer = AutoTokenizer.from_pretrained(args.model.model_name)
ds = preprocess_text(ds, tokenizer=tokenizer, max_sequence_length=67)
model.train()
model = model.to(args.training.device)
if (not args.training.no_cuda) and (not torch.cuda.is_available()):
raise RuntimeError("CUDA is not available. Please use --no-cuda to run this script.")
callbacks = []
if not args.privacy.disable_dp:
sampling_probability = training_args.train_batch_size * training_args.gradient_accumulation_steps / len(ds["train"])
num_steps = int(np.ceil(1 / sampling_probability) * training_args.num_train_epochs)
noise_multiplier = find_noise_multiplier(
sampling_probability=sampling_probability, num_steps=num_steps, target_epsilon=args.privacy.target_epsilon,
target_delta=args.privacy.delta,
eps_error=0.1
)
engine = PrivacyEngine(
module=model,
batch_size=training_args.per_device_train_batch_size*training_args.gradient_accumulation_steps,
sample_size=len(ds['train']),
noise_multiplier=noise_multiplier,
max_grad_norm=args.privacy.per_sample_max_grad_norm,
secure_rng=args.privacy.use_secure_prng,
)
accountant = DPSGDAccountant(
noise_multiplier=noise_multiplier, sampling_probability=sampling_probability, max_steps=num_steps,
eps_error=0.2
)
privacy_callback = PrivacyEngineCallback(
engine,
compute_epsilon=lambda s: accountant.compute_epsilon(num_steps=s, delta=args.privacy.delta)[2]
)
callbacks.append(privacy_callback)
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
trainer = Trainer(
args=training_args,
train_dataset=ds["train"],
eval_dataset=ds["test"],
model=model,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
callbacks=callbacks
)
try:
trainer.train()
finally:
trainer.save_model(output_dir/"challenge")
if args.privacy.disable_dp:
epsilon_final = float('inf')
else:
epsilon_final = accountant.compute_epsilon(num_steps=engine.steps, delta=args.privacy.delta)[2]
trainer.log({"epsilon_final": epsilon_final})
assert np.isclose(epsilon_final, args.privacy.target_epsilon, atol=0.2, rtol=0.0)
print("Training successful. Exiting...")
return 0
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments, ModelArguments, SecurePrivacyArguments, DataArguments))
training_args, model_args, privacy_args, data_args = parser.parse_args_into_dataclasses()
args = Arguments(training=training_args, model=model_args, privacy=privacy_args, data=data_args)
sys.exit(main(args))
| 7,676 | 35.042254 | 124 | py |
MICO | MICO-main/training/train_cifar10.py | import os
import argparse
import warnings
import git
import csv
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchcsprng import create_mt19937_generator, create_random_device_generator
from torch.utils.data import DataLoader
from opacus import PrivacyEngine
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from prv_accountant.dpsgd import find_noise_multiplier
from accountant import PRVAccountant
from mico_competition import ChallengeDataset, CNN, load_cifar10
from tqdm import tqdm, trange
from datetime import datetime
from typing import Callable, Optional
def accuracy(preds: torch.Tensor, labels: torch.Tensor) -> float:
return (preds == labels).mean()
def train(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
train_loader: DataLoader,
criterion,
optimizer: optim.Optimizer,
epoch: int,
compute_epsilon: Optional[Callable[[int], float]] = None):
model.train()
losses = []
top1_acc = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=args.max_physical_batch_size,
optimizer=optimizer
) as memory_safe_data_loader:
if args.disable_dp:
data_loader = train_loader
else:
data_loader = memory_safe_data_loader
# BatchSplittingSampler.__len__() approximates (badly) the length in physical batches
# See https://github.com/pytorch/opacus/issues/516
# We instead heuristically keep track of logical batches processed
pbar = tqdm(data_loader, desc="Batch", unit="batch", position=1, leave=True, total=len(train_loader), disable=None)
logical_batch_len = 0
for i, (inputs, target) in enumerate(data_loader):
inputs = inputs.to(device)
target = target.to(device)
logical_batch_len += len(target)
if logical_batch_len >= args.batch_size:
pbar.update(1)
logical_batch_len = logical_batch_len % args.max_physical_batch_size
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
loss.backward()
optimizer.step()
if (pbar.n + 1) % args.logging_steps == 0 or (pbar.n + 1) == pbar.total:
if not args.disable_dp:
epsilon = compute_epsilon(delta=args.target_delta)
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp=f"(ε={epsilon:.2f}, δ={args.target_delta})"
)
else:
pbar.set_postfix(
epoch=f"{epoch:02}",
train_loss=f"{np.mean(losses):.3f}",
accuracy=f"{np.mean(top1_acc) * 100:.3f}",
dp="(ε = ∞, δ = 0)"
)
pbar.update(pbar.total - pbar.n)
def test(args: argparse.Namespace,
model: nn.Module,
device: torch.device,
test_loader: DataLoader,
criterion):
model.eval()
losses = []
top1_acc = []
with torch.no_grad():
for inputs, target in tqdm(test_loader, desc="Test ", unit="batch", disable=None):
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc)
top1_avg = np.mean(top1_acc)
loss_avg = np.mean(losses)
print(
f"Test Loss : {loss_avg:.6f}\n"
f"Test Accuracy: {top1_avg * 100:.6f}"
)
return np.mean(top1_acc)
def main(args: argparse.Namespace):
noise_generator = None
if not args.secure_mode and args.train_seed is not None:
# Following the advice on https://pytorch.org/docs/1.8.1/notes/randomness.html
if torch.cuda.is_available():
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
torch.use_deterministic_algorithms(True)
torch.cuda.manual_seed(args.train_seed)
torch.cuda.manual_seed_all(args.train_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import random
random.seed(args.train_seed)
os.environ['PYTHONHASHSEED'] = str(args.train_seed)
# Required to get deterministic batches because Opacus uses secure_rng as a generator for
# train_loader when poisson_sampling = True even though secure_mode = False, which sets secure_rng = None
# https://github.com/pytorch/opacus/blob/5e632cdb8d497aade29e5555ad79921c239c78f7/opacus/privacy_engine.py#L206
torch.manual_seed(args.train_seed)
np.random.seed(args.train_seed)
noise_generator = create_mt19937_generator(args.train_seed)
if (args.seed_challenge is None or args.seed_training is None or args.seed_membership is None):
if args.split_seed is None:
seed_generator = create_random_device_generator()
else:
seed_generator = create_mt19937_generator(args.split_seed)
args.seed_challenge, args.seed_training, args.seed_membership = torch.empty(
3, dtype=torch.int64).random_(0, to=None, generator=seed_generator)
print("Using generated seeds\n"
f" seed_challenge = {args.seed_challenge}\n"
f" seed_training = {args.seed_training}\n"
f" seed_membership = {args.seed_membership}\n")
else:
print("Using specified seeds")
full_dataset = load_cifar10(dataset_dir=args.dataset_dir, download=False)
challenge_dataset = ChallengeDataset(
full_dataset,
len_challenge=args.len_challenge,
len_training=args.len_training,
seed_challenge=args.seed_challenge,
seed_training=args.seed_training,
seed_membership=args.seed_membership)
train_dataset = challenge_dataset.get_train_dataset()
test_dataset = challenge_dataset.get_eval_dataset()
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.dataloader_num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.max_physical_batch_size,
num_workers=args.dataloader_num_workers
)
# Supress warnings
warnings.filterwarnings(action="ignore", module="opacus", message=".*Secure RNG turned off")
warnings.filterwarnings(action="ignore", module="torch", message=".*Using a non-full backward hook")
model = CNN()
assert ModuleValidator.is_valid(model)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0)
# Not the same as args.batch_size / len(train_dataset)
args.sample_rate = 1 / len(train_loader)
num_steps = int(len(train_loader) * args.num_epochs)
if not args.disable_dp:
args.noise_multiplier = find_noise_multiplier(
sampling_probability=args.sample_rate,
num_steps=num_steps,
target_epsilon=args.target_epsilon,
target_delta=args.target_delta,
eps_error=0.1
)
privacy_engine = PrivacyEngine(secure_mode=args.secure_mode)
# Override Opacus accountant
# Revise if https://github.com/pytorch/opacus/pull/493 is merged
privacy_engine.accountant = PRVAccountant(
noise_multiplier=args.noise_multiplier,
sample_rate=args.sample_rate,
max_steps=num_steps,
eps_error=0.1,
delta_error=1e-9)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.noise_multiplier,
max_grad_norm=args.max_grad_norm,
poisson_sampling=True,
noise_generator=noise_generator
)
print(f"Training using DP-SGD with {optimizer.original_optimizer.__class__.__name__} optimizer\n"
f" noise multiplier σ = {optimizer.noise_multiplier},\n"
f" clipping norm C = {optimizer.max_grad_norm:},\n"
f" average batch size L = {args.batch_size},\n"
f" sample rate = {args.sample_rate},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)\n"
f" to target ε = {args.target_epsilon}, δ = {args.target_delta}")
compute_epsilon: Optional[Callable[[float], float]] = lambda delta: privacy_engine.get_epsilon(delta=delta)
else:
print(f"Training using SGD with {optimizer.__class__.__name__} optimizer\n"
f" batch size L = {args.batch_size},\n"
f" for {args.num_epochs} epochs ({num_steps} steps)")
compute_epsilon = None
# Must be initialized after attaching the privacy engine.
# See https://discuss.pytorch.org/t/how-to-use-lr-scheduler-in-opacus/111718
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step, gamma=args.lr_scheduler_gamma)
pbar = trange(args.num_epochs, desc="Epoch", unit="epoch", position=0, leave=True, disable=None)
for epoch in pbar:
pbar.set_postfix(lr=f"{scheduler.get_last_lr()}")
train(args, model, device, train_loader, criterion, optimizer, epoch + 1, compute_epsilon=compute_epsilon)
scheduler.step()
acc = test(args, model, device, test_loader, criterion)
with open(os.path.join(args.output_dir, "accuracy"), "w") as f:
print(f"{acc:.3f}", file=f)
if not args.disable_dp:
final_epsilon = compute_epsilon(args.target_delta)
print(f"The trained model is (ε = {final_epsilon}, δ = {args.target_delta})-DP")
with open(os.path.join(args.output_dir, "epsilon"), "w") as f:
print(f"{final_epsilon:.3f}", file=f)
with open(os.path.join(args.output_dir, "seed_challenge"), "w") as f:
print(f"{args.seed_challenge}", file=f)
with open(os.path.join(args.output_dir, "seed_training"), "w") as f:
print(f"{args.seed_training}", file=f)
with open(os.path.join(args.output_dir, "seed_membership"), "w") as f:
print(f"{args.seed_membership}", file=f)
with open(os.path.join(args.output_dir, "solution.csv"), "w") as f:
solution = challenge_dataset.get_solutions()
csv.writer(f).writerow(solution)
torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=int, metavar='ID',
help="an identifier for the trained model")
# Seeds
parser.add_argument("--train_seed", type=int, metavar='TS',
help="seed for reproducibility")
parser.add_argument("--split_seed", type=int, metavar='SS',
help="seed to deterministically generate the 3 seeds for creating splits "
"(--seed_challenge, --seed_trainig, seed_membership)")
parser.add_argument("--seed_challenge", type=int, metavar='SC',
help="seed to select challenge examples")
parser.add_argument("--seed_training", type=int, metavar='ST',
help="seed to select non-challenge training examples")
parser.add_argument("--seed_membership", type=int, metavar='SM',
help="seed to split challenge examples into members/non-members")
# Split lengths
parser.add_argument("--len_training", type=int, metavar="N", required=True,
help="(required) number of examples used for training")
parser.add_argument("--len_challenge", type=int, metavar="m", required=True,
help="(required) number of member and non-member challenge examples "
"(i.e., m members and m non-members)")
# General
parser.add_argument("--secure_mode", action="store_true", default=False,
help="whether to use Opacus secure mode for training (default=True)")
parser.add_argument("--disable_dp", action="store_true", default=False,
help="whether to disable differentially private training altogether (default=False)")
parser.add_argument("--dataloader_num_workers", type=int, metavar='W', default=2,
help="number of workers for data loading. 0 means that the data will be loaded in the main process (default=2). "
"See torch.utils.data.DataLoader")
parser.add_argument("--logging_steps", type=int, metavar='k', default=10,
help="prints accuracy, loss, and privacy accounting information during training every k logical batches "
"(default=10)")
parser.add_argument("--dataset_dir", type=str, metavar="DATA", default=".",
help="root directory for cached dataset (default='.')")
parser.add_argument("--output_dir", type=str, metavar="OUT",
help="output directory. If none given, will pick one based on hyperparameters")
# Training hyperparameters
parser.add_argument("--target_epsilon", type=float, metavar="EPSILON",
help="target DP epsilon. Required unless specifying --disable_dp")
parser.add_argument("--target_delta", type=float, metavar="DELTA",
help="target DP delta. Will use 1/N if unspecified")
parser.add_argument("--batch_size", type=int, metavar="L",
help="expected logical batch size; determines the sample rate of DP-SGD. "
"Actual batch size varies because batches are constructed using Poisson sampling")
parser.add_argument("--max_physical_batch_size", type=int, metavar="B",
help="maximum physical batch size. Use to simulate logical batches larger than available memory and "
"to safeguard against unusually large batches produces by Poisson sampling. "
"See opacus.utils.batch_memory_manager.BatchMemoryManager")
parser.add_argument("--num_epochs", metavar='E', type=int, default=10,
help="number of training epochs (default=10)")
parser.add_argument("--max_grad_norm", type=float, metavar='C', default=1.0,
help="clipping norm for per-sample gradients in DP-SGD (default=1.0)")
parser.add_argument("--learning_rate", type=float, metavar="LR", default=1.0,
help="initial learning rate (default=1.0)")
parser.add_argument("--lr_scheduler_gamma", type=float, metavar="GAMMA", default=1.0,
help="gamma parameter for exponential learning rate scheduler")
parser.add_argument("--lr_scheduler_step", type=int, metavar="S", default=1,
help="step size for exponential learning rate scheduler")
args = parser.parse_args()
if args.len_training is None:
raise ValueError("Please specify --len_training")
if args.len_challenge is None:
raise ValueError("Please specify --len_challenge")
# Parameter validation
if args.secure_mode and args.train_seed is not None:
raise ValueError("Specify either secure mode or a seed for reproducibility, but not both")
if args.target_delta is None:
args.target_delta = 1 / args.len_training
if args.split_seed is not None and (args.seed_challenge is not None or args.seed_training is not None or args.seed_membership is not None):
raise ValueError("A --split_seed was given to generate seeds to construct splits but at least one explicit seed was specified. Bailing out.")
if args.output_dir is None:
now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.disable_dp:
args.output_dir = f"{now}-nodp-lr{args.learning_rate}-gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-" + \
f"E{args.num_epochs}"
else:
args.output_dir = f"{now}-eps{args.target_epsilon}-delta{args.target_delta}-lr{args.learning_rate}-" + \
f"gamma{args.lr_scheduler_gamma}-S{args.lr_scheduler_step}-L{args.batch_size}-E{args.num_epochs}-C{args.max_grad_norm}" + \
f"{'-secure' if args.secure_mode else ''}"
print(f"No --output_dir specified. Will use {args.output_dir}")
if args.model_id is not None:
args.output_dir = args.output_dir + f"_{args.model_id}"
os.makedirs(args.output_dir, exist_ok=True)
with open(os.path.join(args.output_dir, "arguments"), "w") as argfile:
try:
commit_hash = git.Repo(".", search_parent_directories=True).git.rev_parse("HEAD")
except git.exc.InvalidGitRepositoryError:
commit_hash = "unknown"
print(f"Commit hash: {commit_hash}")
print(f"# Commit hash: {commit_hash}", file=argfile)
for arg in vars(args):
print(f"--{arg} {getattr(args, arg)}")
print(f"--{arg} {getattr(args, arg)}", file=argfile)
main(args)
| 17,963 | 41.976077 | 149 | py |
MICO | MICO-main/src/mico-competition/mico.py | from __future__ import annotations
import os
import torch
import torch.nn as nn
from collections import OrderedDict
from typing import List, Optional, Union, Type, TypeVar
from torch.utils.data import Dataset, ConcatDataset, random_split
D = TypeVar("D", bound="ChallengeDataset")
LEN_CHALLENGE = 100
class ChallengeDataset:
"""Reconstructs the data splits associated with a model from stored seeds.
Given a `torch.utils.Dataset`, the desired length of the training dataset `n`,
and the desired number of members/non-member challenge examples `m`, it uses
`torch.utils.data.random_split` with the stored seeds to produce:
- `challenge` : `2m` challenge examples
- `nonmember` : `m` non-members challenge examples from `challenge`
- `member` : `m` member challenge examples, from `challenge`
- `training` : non-challenge examples to use for model training
- `evaluation`: non-challenge examples to use for model evaluation
Use `get_training_dataset` to construct the full training dataset
(the concatenation of `member` and `training`) to train a model.
Use `get_eval_dataset` to retrieve `evaluation`. Importantly, do not
attempt to use `nonmember` for model evaluation, as releasing the
evaluation results would leak membership information.
The diagram below details the process, where arrows denote calls to
`torch.utils.data.random_split` and `N = len(dataset)`:
┌────────────────────────────────────────────────────────────┐
│ dataset │
└──────────────────────────────┬─────────────────────────────┘
│N
seed_challenge │
┌────────────────────┴────────┐
│2m │N - 2m
▼ ▼
┌───────────────────┬────────────────────────────────────────┐
│ challenge │ rest │
└─────────┬─────────┴───────────────────┬────────────────────┘
│2m │N - 2m
seed_membership │ seed_training │
┌────┴────┐ ┌─────────┴────────┐
│m │m │n - m │N - n - m
▼ ▼ ▼ ▼
┌─────────┬─────────┬───────────────────┬────────────────────┐
│nonmember│ member │ training │ evaluation │
└─────────┴─────────┴───────────────────┴────────────────────┘
- Models are trained on `member + training` and evaluated on `evaluation`
- Standard scenarios disclose `challenge` (equivalently, `seed_challenge`)
- DP distinguisher scenarios also disclose `training` and `evaluation` (equivalently, `seed_training`)
- To disclose ground truth, disclose `nonmember` and `member` (equivalently, `seed_membership`)
"""
def __init__(self, dataset: Dataset, len_training: int, len_challenge: int,
seed_challenge: int, seed_training: Optional[int], seed_membership: Optional[int]) -> None:
"""Pseudorandomly select examples for `challenge`, `non-member`, `member`, `training`, and `evaluation`
splits from given seeds. Only the seed for `challenge` is mandatory.
Args:
dataset (Dataset): Dataset to select examples from.
len_training (int): Length of the training dataset.
len_challenge (int): Number of challenge examples (`len_challenge` members and `len_challenge` non-members).
seed_challenge (int): Seed to select challenge examples.
seed_training (Optional[int]): Seed to select non-challenge training examples.
seed_membership (Optional[int]): Seed to split challenge examples into members/non-members.
"""
from torchcsprng import create_mt19937_generator
challenge_gen = create_mt19937_generator(seed_challenge)
self.challenge, self.rest = random_split(
dataset,
[2 * len_challenge, len(dataset) - 2 * len_challenge],
generator = challenge_gen)
if seed_training is not None:
training_gen = create_mt19937_generator(seed_training)
self.training, self.evaluation = random_split(
self.rest,
[len_training - len_challenge, len(dataset) - len_training - len_challenge],
generator = training_gen)
if seed_membership is not None:
membership_gen = create_mt19937_generator(seed_membership)
self.nonmember, self.member = random_split(
self.challenge,
[len_challenge, len_challenge],
generator = membership_gen)
def get_challenges(self) -> Dataset:
"""Returns the challenge dataset.
Returns:
Dataset: The challenge examples.
"""
return self.challenge
def get_train_dataset(self) -> Dataset:
"""Returns the training dataset.
Raises:
ValueError: If the seed to select non-challenge training examples has not been set.
ValueError: If the seed to split challenges into members/non-members has not been set.
Returns:
Dataset: The training dataset.
"""
if self.training is None:
raise ValueError("The seed to generate the training dataset has not been set.")
if self.member is None:
raise ValueError("The seed to split challenges into members/non-members has not been set.")
return ConcatDataset([self.member, self.training])
def get_eval_dataset(self) -> Dataset:
"""Returns the evaluation dataset.
Raises:
ValueError: If the seed to generate the evaluation dataset has not been set.
Returns:
Dataset: The evaluation dataset.
"""
if self.evaluation is None:
raise ValueError("The seed to generate the evaluation dataset has not been set.")
return self.evaluation
def get_solutions(self) -> List:
"""Returns the membership labels of the challenges.
Raises:
ValueError: If the seed to generate the evaluation dataset has not been set.
Returns:
List: The list of membership labels for challenges, indexed as in the
Dataset returned by `get_challenges()`.
"""
if self.member is None:
raise ValueError("The seed to split challenges into members/non-members has not been set.")
member_indices = set(self.challenge.indices[i] for i in self.member.indices)
labels = [1 if i in member_indices else 0 for i in self.challenge.indices]
return labels
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], dataset: Dataset, len_training: int, len_challenge: int=LEN_CHALLENGE) -> D:
"""Loads a ChallengeDataset from a directory `path`.
The directory must contain, at a minimum, the file `seed_challenge`.
Args:
path (str): Path to the folder containing the dataset.
Returns:
ChallengeDataset: The loaded ChallengeDataset.
"""
# Load the seeds.
if os.path.exists(os.path.join(path, "seed_challenge")):
with open(os.path.join(path, "seed_challenge"), "r") as f:
seed_challenge = int(f.read())
else:
raise Exception(f"`seed_challenge` was not found in {path}")
seed_training = None
if os.path.exists(os.path.join(path, "seed_training")):
with open(os.path.join(path, "seed_training"), "r") as f:
seed_training = int(f.read())
seed_membership = None
if os.path.exists(os.path.join(path, "seed_membership")):
with open(os.path.join(path, "seed_membership"), "r") as f:
seed_membership = int(f.read())
return cls(
dataset=dataset,
len_training=len_training,
len_challenge=len_challenge,
seed_challenge=seed_challenge,
seed_training=seed_training,
seed_membership=seed_membership
)
X = TypeVar("X", bound="CNN")
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(
nn.Conv2d(3, 128, kernel_size=8, stride=2, padding=3), nn.Tanh(),
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(128, 256, kernel_size=3), nn.Tanh(),
nn.Conv2d(256, 256, kernel_size=3), nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(in_features=6400, out_features=10)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# shape of x is [B, 3, 32, 32] for CIFAR10
logits = self.cnn(x)
return logits
@classmethod
def load(cls: Type[X], path: Union[str, os.PathLike]) -> X:
model = cls()
state_dict = torch.load(path)
new_state_dict = OrderedDict((k.replace('_module.', ''), v) for k, v in state_dict.items())
model.load_state_dict(new_state_dict)
model.eval()
return model
Y = TypeVar("Y", bound="MLP")
class MLP(nn.Module):
"""
The fully-connected network architecture from Bao et al. (2022).
"""
def __init__(self):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(600, 128), nn.Tanh(),
nn.Linear(128, 100)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.mlp(x)
@classmethod
def load(cls: Type[Y], path: Union[str, os.PathLike]) -> Y:
model = cls()
state_dict = torch.load(path)
new_state_dict = OrderedDict((k.replace('_module.', ''), v) for k, v in state_dict.items())
model.load_state_dict(new_state_dict)
model.eval()
return model
def load_model(task: str, path: Union[str, os.PathLike]) -> nn.Module:
if task == 'cifar10':
return CNN.load(os.path.join(path, 'model.pt'))
elif task == 'purchase100':
return MLP.load(os.path.join(path, 'model.pt'))
elif task == 'sst2':
from transformers import AutoModelForSequenceClassification
# tokenizer = AutoTokenizer.from_pretrained('roberta-base')
model = AutoModelForSequenceClassification.from_pretrained(path, num_labels=2)
model.eval()
return model
else:
raise ValueError("`task` must be one of {'cifar10', 'purchase100', 'sst2'}")
| 10,705 | 39.55303 | 139 | py |
MICO | MICO-main/src/mico-competition/challenge_datasets.py | import os
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
def load_cifar10(dataset_dir: str = ".", download=True) -> Dataset:
"""Loads the CIFAR10 dataset.
"""
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
# Precomputed statistics of CIFAR10 dataset
# Exact values are assumed to be known, but can be estimated with a modest privacy budget
# Opacus wrongly uses CIFAR10_STD = (0.2023, 0.1994, 0.2010)
# This is the _average_ std across all images (see https://github.com/kuangliu/pytorch-cifar/issues/8)
CIFAR10_MEAN = (0.49139968, 0.48215841, 0.44653091)
CIFAR10_STD = (0.24703223, 0.24348513, 0.26158784)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_MEAN, CIFAR10_STD)
])
# NB: torchvision checks the integrity of downloaded files
train_dataset = CIFAR10(
root=f"{dataset_dir}/cifar10",
train=True,
download=download,
transform=transform
)
test_dataset = CIFAR10(
root=f"{dataset_dir}/cifar10",
train=False,
download=download,
transform=transform
)
return ConcatDataset([train_dataset, test_dataset])
def load_sst2() -> Dataset:
"""Loads the SST2 dataset.
"""
import datasets
# Specify cache_dir as argument?
ds = datasets.load_dataset("glue", "sst2")
return ConcatDataset([ds['train'], ds['validation']])
class Purchase100(Dataset):
"""
Purchase100 dataset pre-processed by Shokri et al.
(https://github.com/privacytrustlab/datasets/blob/master/dataset_purchase.tgz).
We save the dataset in a .pickle version because it is much faster to load
than the original file.
"""
def __init__(self, dataset_dir: str) -> None:
import pickle
dataset_path = os.path.join(dataset_dir, 'purchase100', 'dataset_purchase')
# Saving the dataset in pickle format because it is quicker to load.
dataset_path_pickle = dataset_path + '.pickle'
if not os.path.exists(dataset_path) and not os.path.exists(dataset_path_pickle):
raise ValueError("Purchase-100 dataset not found.\n"
"You may download the dataset from https://www.comp.nus.edu.sg/~reza/files/datasets.html\n"
f"and unzip it in the {dataset_dir}/purchase100 directory")
if not os.path.exists(dataset_path_pickle):
print('Found the dataset. Saving it in a pickle file that takes less time to load...')
purchase = np.loadtxt(dataset_path, dtype=int, delimiter=',')
with open(dataset_path_pickle, 'wb') as f:
pickle.dump({'dataset': purchase}, f)
with open(dataset_path_pickle, 'rb') as f:
dataset = pickle.load(f)['dataset']
self.labels = list(dataset[:, 0] - 1)
self.records = torch.FloatTensor(dataset[:, 1:])
assert len(self.labels) == len(self.records), f'ERROR: {len(self.labels)} and {len(self.records)}'
print('Successfully loaded the Purchase-100 dataset consisting of',
f'{len(self.records)} records and {len(self.records[0])}', 'attributes.')
def __len__(self) -> int:
return len(self.records)
def __getitem__(self, idx: int):
return self.records[idx], self.labels[idx]
def load_purchase100(dataset_dir: str = ".") -> Dataset:
"""Loads the Purchase-100 dataset.
"""
return Purchase100(dataset_dir)
| 3,560 | 34.61 | 120 | py |
pineko | pineko-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import pathlib
import pineko
here = pathlib.Path(__file__).absolute().parent
# -- Project information -----------------------------------------------------
project = "pineko"
copyright = "2023, the PineLine team"
author = "the PineLine team"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.bibtex",
"sphinx.ext.napoleon",
"sphinx.ext.graphviz",
"sphinx.ext.extlinks",
]
autosectionlabel_prefix_document = True
# autosectionlabel_maxdepth = 10
# Allow to embed rst syntax in markdown files.
enable_eval_rst = True
# The master toctree document.
master_doc = "index"
bibtex_bibfiles = ["refs.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
}
use_index = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["shared/*"]
# A string to be included at the beginning of all files
shared = here / "shared"
rst_prolog = "\n".join(
[x.read_text(encoding="utf-8") for x in pathlib.Path(shared).glob("*.rst")]
)
extlinks = {
"yadism": ("https://n3pdf.github.io/yadism/%s", "yadism"),
"banana": ("https://n3pdf.github.io/banana/%s", "banana"),
"pineappl": ("https://n3pdf.github.io/pineappl/%s", "pineappl"),
"eko": ("https://github.com/N3PDF/eko/%s", "eko"),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
html_css_files = [
"site.css",
]
| 3,189 | 30.27451 | 79 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_elas.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
PATH_Sigma = os.path.join(args.data_path, './Meshes/Random_UnitCell_sigma_10.npy')
PATH_XY = os.path.join(args.data_path, './Meshes/Random_UnitCell_XY_10.npy')
PATH_rr = os.path.join(args.data_path, './Meshes/Random_UnitCell_rr_10.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model, model_iphi = get_model(args)
print(count_params(model), count_params(model_iphi))
params = list(model.parameters()) + list(model_iphi.parameters())
################################################################
# load data and data normalization
################################################################
input_rr = np.load(PATH_rr)
input_rr = torch.tensor(input_rr, dtype=torch.float).permute(1, 0)
input_s = np.load(PATH_Sigma)
input_s = torch.tensor(input_s, dtype=torch.float).permute(1, 0).unsqueeze(-1)
input_xy = np.load(PATH_XY)
input_xy = torch.tensor(input_xy, dtype=torch.float).permute(2, 0, 1)
train_rr = input_rr[:ntrain]
test_rr = input_rr[-ntest:]
train_s = input_s[:ntrain]
test_s = input_s[-ntest:]
train_xy = input_xy[:ntrain]
test_xy = input_xy[-ntest:]
print(train_rr.shape, train_s.shape, train_xy.shape)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_rr, train_s, train_xy),
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_rr, test_s, test_xy),
batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(params, lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
N_sample = 1000
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for rr, sigma, mesh in train_loader:
rr, sigma, mesh = rr.cuda(), sigma.cuda(), mesh.cuda()
samples_x = torch.rand(batch_size, N_sample, 2).cuda() * 3 - 1
optimizer.zero_grad()
out = model(mesh, code=rr, iphi=model_iphi)
samples_xi = model_iphi(samples_x, code=rr)
loss_data = myloss(out.view(batch_size, -1), sigma.view(batch_size, -1))
loss = loss_data
loss.backward()
optimizer.step()
train_l2 += loss_data.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for rr, sigma, mesh in test_loader:
rr, sigma, mesh = rr.cuda(), sigma.cuda(), mesh.cuda()
out = model(mesh, code=rr, iphi=model_iphi)
test_l2 += myloss(out.view(batch_size, -1), sigma.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 4,143 | 33.823529 | 103 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_airfoils.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_X = os.path.join(args.data_path, './naca/NACA_Cylinder_X.npy')
INPUT_Y = os.path.join(args.data_path, './naca/NACA_Cylinder_Y.npy')
OUTPUT_Sigma = os.path.join(args.data_path, './naca/NACA_Cylinder_Q.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
inputX = np.load(INPUT_X)
inputX = torch.tensor(inputX, dtype=torch.float)
inputY = np.load(INPUT_Y)
inputY = torch.tensor(inputY, dtype=torch.float)
input = torch.stack([inputX, inputY], dim=-1)
output = np.load(OUTPUT_Sigma)[:, 4]
output = torch.tensor(output, dtype=torch.float)
print(input.shape, output.shape)
x_train = input[:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[ntrain:ntrain + ntest, ::r1, ::r2][:, :s1, :s2]
y_test = output[ntrain:ntrain + ntest, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 2)
x_test = x_test.reshape(ntest, s1, s2, 2)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
# plot
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
ind = -1
X = x[ind, :, :, 0].squeeze().detach().cpu().numpy()
Y = x[ind, :, :, 1].squeeze().detach().cpu().numpy()
truth = y[ind].squeeze().detach().cpu().numpy()
pred = out[ind].squeeze().detach().cpu().numpy()
nx = 40 // r1
ny = 20 // r2
X_small = X[nx:-nx, :ny]
Y_small = Y[nx:-nx, :ny]
truth_small = truth[nx:-nx, :ny]
pred_small = pred[nx:-nx, :ny]
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(16, 16))
ax[0, 0].pcolormesh(X, Y, truth, shading='gouraud')
ax[1, 0].pcolormesh(X, Y, pred, shading='gouraud')
ax[2, 0].pcolormesh(X, Y, pred - truth, shading='gouraud')
ax[0, 1].pcolormesh(X_small, Y_small, truth_small, shading='gouraud')
ax[1, 1].pcolormesh(X_small, Y_small, pred_small, shading='gouraud')
ax[2, 1].pcolormesh(X_small, Y_small, np.abs(pred_small - truth_small), shading='gouraud')
fig.show()
| 4,794 | 32.767606 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_elas_interp.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_PATH = os.path.join(args.data_path, './Interp/Random_UnitCell_mask_10_interp.npy')
OUTPUT_PATH = os.path.join(args.data_path, './Interp/Random_UnitCell_sigma_10_interp.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
input = np.load(INPUT_PATH)
input = torch.tensor(input, dtype=torch.float).permute(2, 0, 1)
output = np.load(OUTPUT_PATH)
output = torch.tensor(output, dtype=torch.float).permute(2, 0, 1)
x_train = input[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
y_test = output[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 1)
x_test = x_test.reshape(ntest, s1, s2, 1)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
mask = x.clone()
optimizer.zero_grad()
out = model(x)
out = out * mask
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
mask = x.clone()
out = model(x)
out2 = out * mask
test_l2 += myloss(out2.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 3,753 | 30.283333 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_pipe.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
INPUT_X = os.path.join(args.data_path, 'Pipe_X.npy')
INPUT_Y = os.path.join(args.data_path, 'Pipe_Y.npy')
OUTPUT_Sigma = os.path.join(args.data_path, 'Pipe_Q.npy')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
inputX = np.load(INPUT_X)
inputX = torch.tensor(inputX, dtype=torch.float)
inputY = np.load(INPUT_Y)
inputY = torch.tensor(inputY, dtype=torch.float)
input = torch.stack([inputX, inputY], dim=-1)
output = np.load(OUTPUT_Sigma)[:, 0]
output = torch.tensor(output, dtype=torch.float)
x_train = input[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = output[:N][:ntrain, ::r1, ::r2][:, :s1, :s2]
x_test = input[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
y_test = output[:N][-ntest:, ::r1, ::r2][:, :s1, :s2]
x_train = x_train.reshape(ntrain, s1, s2, 2)
x_test = x_test.reshape(ntest, s1, s2, 2)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model, os.path.join(model_save_path, model_save_name))
X = x[0, :, :, 0].squeeze().detach().cpu().numpy()
Y = x[0, :, :, 1].squeeze().detach().cpu().numpy()
truth = y[0].squeeze().detach().cpu().numpy()
pred = out[0].squeeze().detach().cpu().numpy()
fig, ax = plt.subplots(nrows=3, figsize=(16, 16))
ax[0].pcolormesh(X, Y, truth, shading='gouraud')
ax[1].pcolormesh(X, Y, pred, shading='gouraud')
ax[2].pcolormesh(X, Y, pred - truth, shading='gouraud')
fig.show()
| 4,190 | 31.238462 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_darcy.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
TRAIN_PATH = os.path.join(args.data_path, './piececonst_r421_N1024_smooth1.mat')
TEST_PATH = os.path.join(args.data_path, './piececonst_r421_N1024_smooth2.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(TRAIN_PATH)
x_train = reader.read_field('coeff')[:ntrain, ::r1, ::r2][:, :s1, :s2]
y_train = reader.read_field('sol')[:ntrain, ::r1, ::r2][:, :s1, :s2]
reader.load_file(TEST_PATH)
x_test = reader.read_field('coeff')[:ntest, ::r1, ::r2][:, :s1, :s2]
y_test = reader.read_field('sol')[:ntest, ::r1, ::r2][:, :s1, :s2]
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
x_test = x_normalizer.encode(x_test)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
y_normalizer.cuda()
x_train = x_train.reshape(ntrain, s1, s2, 1)
x_test = x_test.reshape(ntest, s1, s2, 1)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s1, s2)
out = y_normalizer.decode(out)
y = y_normalizer.decode(y)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s1, s2)
out = y_normalizer.decode(out)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
| 3,958 | 30.927419 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_ns.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.params import get_args
from model_dict import get_model
from utils.adam import Adam
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
TRAIN_PATH = os.path.join(args.data_path, './NavierStokes_V1e-5_N1200_T20.mat')
TEST_PATH = os.path.join(args.data_path, './NavierStokes_V1e-5_N1200_T20.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
T_in = args.T_in
T_out = args.T_out
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(TRAIN_PATH)
train_a = reader.read_field('u')[:ntrain, ::r1, ::r2, :T_in]
train_u = reader.read_field('u')[:ntrain, ::r1, ::r2, T_in:T_in + T_out]
test_a = reader.read_field('u')[-ntest:, ::r1, ::r2, :T_in]
test_u = reader.read_field('u')[-ntest:, ::r1, ::r2, T_in:T_in + T_out]
print(train_u.shape)
print(test_u.shape)
train_a = train_a.reshape(ntrain, s1, s2, T_in)
test_a = test_a.reshape(ntest, s1, s2, T_in)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_a, train_u), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(test_a, test_u), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False)
step = 1
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2_step = 0
train_l2_full = 0
for xx, yy in train_loader:
loss = 0
xx = xx.to(device)
yy = yy.to(device)
for t in range(0, T_out, step):
y = yy[..., t:t + step]
im = model(xx)
loss += myloss(im.reshape(batch_size, -1), y.reshape(batch_size, -1))
if t == 0:
pred = im
else:
pred = torch.cat((pred, im), -1)
xx = torch.cat((xx[..., step:], im), dim=-1)
train_l2_step += loss.item()
l2_full = myloss(pred.reshape(batch_size, -1), yy.reshape(batch_size, -1))
train_l2_full += l2_full.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
test_l2_step = 0
test_l2_full = 0
with torch.no_grad():
for xx, yy in test_loader:
loss = 0
xx = xx.to(device)
yy = yy.to(device)
for t in range(0, T_out, step):
y = yy[..., t:t + step]
im = model(xx)
loss += myloss(im.reshape(batch_size, -1), y.reshape(batch_size, -1))
if t == 0:
pred = im
else:
pred = torch.cat((pred, im), -1)
xx = torch.cat((xx[..., step:], im), dim=-1)
test_l2_step += loss.item()
test_l2_full += myloss(pred.reshape(batch_size, -1), yy.reshape(batch_size, -1)).item()
t2 = default_timer()
scheduler.step()
print(ep, t2 - t1, train_l2_step / ntrain / (T_out / step), train_l2_full / ntrain,
test_l2_step / ntest / (T_out / step),
test_l2_full / ntest)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name)) | 4,624 | 31.118056 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/exp_plas.py | import torch.nn.functional as F
import matplotlib.pyplot as plt
from timeit import default_timer
from utils.utilities3 import *
from utils.adam import Adam
from utils.params import get_args
from model_dict import get_model
import math
import os
torch.manual_seed(0)
np.random.seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
################################################################
# configs
################################################################
args = get_args()
DATA_PATH = os.path.join(args.data_path, './plas_N987_T20.mat')
ntrain = args.ntrain
ntest = args.ntest
N = args.ntotal
in_channels = args.in_dim
out_channels = args.out_dim
r1 = args.h_down
r2 = args.w_down
s1 = int(((args.h - 1) / r1) + 1)
s2 = int(((args.w - 1) / r2) + 1)
t = args.T_in
batch_size = args.batch_size
learning_rate = args.learning_rate
epochs = args.epochs
step_size = args.step_size
gamma = args.gamma
model_save_path = args.model_save_path
model_save_name = args.model_save_name
################################################################
# models
################################################################
model = get_model(args)
print(count_params(model))
################################################################
# load data and data normalization
################################################################
reader = MatReader(DATA_PATH)
x_train = reader.read_field('input')[:ntrain, ::r1][:, :s1].reshape(ntrain, s1, 1, 1, 1).repeat(1, 1, s2, t, 1)
y_train = reader.read_field('output')[:ntrain, ::r1, ::r2][:, :s1, :s2]
reader.load_file(DATA_PATH)
x_test = reader.read_field('input')[-ntest:, ::r1][:, :s1].reshape(ntest, s1, 1, 1, 1).repeat(1, 1, s2, t, 1)
y_test = reader.read_field('output')[-ntest:, ::r1, ::r2][:, :s1, :s2]
print(x_train.shape, y_train.shape)
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
x_test = x_normalizer.encode(x_test)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
y_normalizer.cuda()
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x_test, y_test), batch_size=batch_size,
shuffle=False)
################################################################
# training and evaluation
################################################################
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
myloss = LpLoss(size_average=False, p=2)
for ep in range(epochs):
model.train()
t1 = default_timer()
train_l2 = 0
train_reg = 0
for x, y in train_loader:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
out = model(x).reshape(batch_size, s1, s2, t, out_channels)
out = y_normalizer.decode(out)
y = y_normalizer.decode(y)
loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))
loss.backward()
optimizer.step()
train_l2 += loss.item()
scheduler.step()
model.eval()
test_l2 = 0.0
with torch.no_grad():
for x, y in test_loader:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s1, s2, t, out_channels)
out = y_normalizer.decode(out)
test_l2 += myloss(out.view(batch_size, -1), y.view(batch_size, -1)).item()
train_l2 /= ntrain
train_reg /= ntrain
test_l2 /= ntest
t2 = default_timer()
print(ep, t2 - t1, train_l2, train_reg, test_l2)
if ep % step_size == 0:
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
print('save model')
torch.save(model.state_dict(), os.path.join(model_save_path, model_save_name))
truth = y[0].squeeze().detach().cpu().numpy()
pred = out[0].squeeze().detach().cpu().numpy()
ZERO = torch.zeros(s1, s2)
truth_du = np.linalg.norm(truth[:, :, :, 2:], axis=-1)
pred_du = np.linalg.norm(pred[:, :, :, 2:], axis=-1)
lims = dict(cmap='RdBu_r', vmin=truth_du.min(), vmax=truth_du.max())
fig, ax = plt.subplots(nrows=2, ncols=5, figsize=(20, 6))
t0, t1, t2, t3, t4 = 0, 4, 9, 14, 19
ax[0, 0].scatter(truth[:, :, 0, 0], truth[:, :, 0, 1], 10, truth_du[:, :, 0], **lims)
ax[1, 0].scatter(pred[:, :, 0, 0], pred[:, :, 0, 1], 10, pred_du[:, :, 0], **lims)
ax[0, 1].scatter(truth[:, :, 4, 0], truth[:, :, 4, 1], 10, truth_du[:, :, 4], **lims)
ax[1, 1].scatter(pred[:, :, 4, 0], pred[:, :, 4, 1], 10, pred_du[:, :, 4], **lims)
ax[0, 2].scatter(truth[:, :, 9, 0], truth[:, :, 9, 1], 10, truth_du[:, :, 9], **lims)
ax[1, 2].scatter(pred[:, :, 9, 0], pred[:, :, 9, 1], 10, pred_du[:, :, 9], **lims)
ax[0, 3].scatter(truth[:, :, 14, 0], truth[:, :, 14, 1], 10, truth_du[:, :, 14], **lims)
ax[1, 3].scatter(pred[:, :, 14, 0], pred[:, :, 14, 1], 10, pred_du[:, :, 14], **lims)
ax[0, 4].scatter(truth[:, :, 19, 0], truth[:, :, 19, 1], 10, truth_du[:, :, 19], **lims)
ax[1, 4].scatter(pred[:, :, 19, 0], pred[:, :, 19, 1], 10, pred_du[:, :, 19], **lims)
fig.show()
| 5,411 | 37.935252 | 115 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_Irregular_Geo.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 2D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# geo projection
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, s1=32, s2=32):
super(SpectralConv2d, self).__init__()
"""
from geoFNO
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.s1 = s1
self.s2 = s2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, u, x_in=None, x_out=None, iphi=None, code=None):
batchsize = u.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
if x_in == None:
u_ft = torch.fft.rfft2(u)
s1 = u.size(-2)
s2 = u.size(-1)
else:
u_ft = self.fft2d(u, x_in, iphi, code)
s1 = self.s1
s2 = self.s2
# Multiply relevant Fourier modes
# print(u.shape, u_ft.shape)
factor1 = self.compl_mul2d(u_ft[:, :, :self.modes1, :self.modes2], self.weights1)
factor2 = self.compl_mul2d(u_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
if x_out == None:
out_ft = torch.zeros(batchsize, self.out_channels, s1, s2 // 2 + 1, dtype=torch.cfloat, device=u.device)
out_ft[:, :, :self.modes1, :self.modes2] = factor1
out_ft[:, :, -self.modes1:, :self.modes2] = factor2
u = torch.fft.irfft2(out_ft, s=(s1, s2))
else:
out_ft = torch.cat([factor1, factor2], dim=-2)
u = self.ifft2d(out_ft, x_out, iphi, code)
return u
def fft2d(self, u, x_in, iphi=None, code=None):
# u (batch, channels, n)
# x_in (batch, n, 2) locations in [0,1]*[0,1]
# iphi: function: x_in -> x_c
batchsize = x_in.shape[0]
N = x_in.shape[1]
device = x_in.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_in
else:
x = iphi(x_in, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[..., 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[..., 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(-1j * 2 * np.pi * K).to(device)
# Y (batch, channels, N)
u = u + 0j
Y = torch.einsum("bcn,bnxy->bcxy", u, basis)
return Y
def ifft2d(self, u_ft, x_out, iphi=None, code=None):
# u_ft (batch, channels, kmax, kmax)
# x_out (batch, N, 2) locations in [0,1]*[0,1]
# iphi: function: x_out -> x_c
batchsize = x_out.shape[0]
N = x_out.shape[1]
device = x_out.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_out
else:
x = iphi(x_out, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[:, :, 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[:, :, 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(1j * 2 * np.pi * K).to(device)
# coeff (batch, channels, m1, m2)
u_ft2 = u_ft[..., 1:].flip(-1, -2).conj()
u_ft = torch.cat([u_ft, u_ft2], dim=-1)
# Y (batch, channels, N)
Y = torch.einsum("bcxy,bnxy->bcn", u_ft, basis)
Y = Y.real
return Y
class IPHI(nn.Module):
def __init__(self, width=32):
super(IPHI, self).__init__()
"""
inverse phi: x -> xi
"""
self.width = width
self.fc0 = nn.Linear(4, self.width)
self.fc_code = nn.Linear(42, self.width)
self.fc_no_code = nn.Linear(3 * self.width, 4 * self.width)
self.fc1 = nn.Linear(4 * self.width, 4 * self.width)
self.fc2 = nn.Linear(4 * self.width, 4 * self.width)
self.fc3 = nn.Linear(4 * self.width, 4 * self.width)
self.fc4 = nn.Linear(4 * self.width, 2)
self.activation = torch.tanh
self.center = torch.tensor([0.0001, 0.0001], device="cuda").reshape(1, 1, 2)
self.B = np.pi * torch.pow(2, torch.arange(0, self.width // 4, dtype=torch.float, device="cuda")).reshape(1, 1,
1,
self.width // 4)
def forward(self, x, code=None):
# x (batch, N_grid, 2)
# code (batch, N_features)
# some feature engineering
angle = torch.atan2(x[:, :, 1] - self.center[:, :, 1], x[:, :, 0] - self.center[:, :, 0])
radius = torch.norm(x - self.center, dim=-1, p=2)
xd = torch.stack([x[:, :, 0], x[:, :, 1], angle, radius], dim=-1)
# sin features from NeRF
b, n, d = xd.shape[0], xd.shape[1], xd.shape[2]
x_sin = torch.sin(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
x_cos = torch.cos(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
xd = self.fc0(xd)
xd = torch.cat([xd, x_sin, x_cos], dim=-1).reshape(b, n, 3 * self.width)
if code != None:
cd = self.fc_code(code)
cd = cd.unsqueeze(1).repeat(1, xd.shape[1], 1)
xd = torch.cat([cd, xd], dim=-1)
else:
xd = self.fc_no_code(xd)
xd = self.fc1(xd)
xd = self.activation(xd)
xd = self.fc2(xd)
xd = self.activation(xd)
xd = self.fc3(xd)
xd = self.activation(xd)
xd = self.fc4(xd)
return x + x * xd
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[3, 3], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv2d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv2d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W = x.shape
L = H * W
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W = x.shape
L = H * W
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1],
self.patch_size[1]).contiguous() \
.permute(0, 2, 4, 1, 3, 5).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]), x.shape[1],
self.patch_size[0],
self.patch_size[1])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), C, self.patch_size[0],
self.patch_size[1]).permute(0, 3, 1, 4, 2, 5).contiguous() \
.view(B, C, H, W).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True, modes1=12, modes2=12, s1=96, s2=96):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# geo projectors
self.s1 = s1
self.s2 = s2
self.fc0 = nn.Linear(in_channels, width)
self.fftproject_in = SpectralConv2d(width, width, modes1, modes2, s1, s2)
self.fftproject_out = SpectralConv2d(width, width, modes1, modes2, s1, s2)
self.convproject_in = nn.Conv2d(2, width, 1)
self.convproject_out = nn.Conv1d(2, width, 1)
# dim projectors
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x, code=None, x_in=None, x_out=None, iphi=None):
if x_in == None:
x_in = x
if x_out == None:
x_out = x
grid = self.get_grid([x.shape[0], self.s1, self.s2], x.device).permute(0, 3, 1, 2)
u = self.fc0(x)
u = u.permute(0, 2, 1)
uc1 = self.fftproject_in(u, x_in=x_in, iphi=iphi, code=code)
uc2 = self.convproject_in(grid)
uc = uc1 + uc2
uc = F.gelu(uc)
x1 = self.inc(uc)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
uc = self.outc(x)
u = self.fftproject_out(uc, x_out=x_out, iphi=iphi, code=code)
u1 = self.convproject_out(x_out.permute(0, 2, 1))
u = u + u1
u = u.permute(0, 2, 1)
u = self.fc1(u)
u = F.gelu(u)
u = self.fc2(u)
return u
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 17,899 | 39.134529 | 130 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_Irregular_Geo.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, s1=32, s2=32):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.s1 = s1
self.s2 = s2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, u, x_in=None, x_out=None, iphi=None, code=None):
batchsize = u.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
if x_in == None:
u_ft = torch.fft.rfft2(u)
s1 = u.size(-2)
s2 = u.size(-1)
else:
u_ft = self.fft2d(u, x_in, iphi, code)
s1 = self.s1
s2 = self.s2
# Multiply relevant Fourier modes
# print(u.shape, u_ft.shape)
factor1 = self.compl_mul2d(u_ft[:, :, :self.modes1, :self.modes2], self.weights1)
factor2 = self.compl_mul2d(u_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
if x_out == None:
out_ft = torch.zeros(batchsize, self.out_channels, s1, s2 // 2 + 1, dtype=torch.cfloat, device=u.device)
out_ft[:, :, :self.modes1, :self.modes2] = factor1
out_ft[:, :, -self.modes1:, :self.modes2] = factor2
u = torch.fft.irfft2(out_ft, s=(s1, s2))
else:
out_ft = torch.cat([factor1, factor2], dim=-2)
u = self.ifft2d(out_ft, x_out, iphi, code)
return u
def fft2d(self, u, x_in, iphi=None, code=None):
# u (batch, channels, n)
# x_in (batch, n, 2) locations in [0,1]*[0,1]
# iphi: function: x_in -> x_c
batchsize = x_in.shape[0]
N = x_in.shape[1]
device = x_in.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
# print(x_in.shape)
if iphi == None:
x = x_in
else:
x = iphi(x_in, code)
# print(x.shape)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[..., 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[..., 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(-1j * 2 * np.pi * K).to(device)
# Y (batch, channels, N)
u = u + 0j
Y = torch.einsum("bcn,bnxy->bcxy", u, basis)
return Y
def ifft2d(self, u_ft, x_out, iphi=None, code=None):
# u_ft (batch, channels, kmax, kmax)
# x_out (batch, N, 2) locations in [0,1]*[0,1]
# iphi: function: x_out -> x_c
batchsize = x_out.shape[0]
N = x_out.shape[1]
device = x_out.device
m1 = 2 * self.modes1
m2 = 2 * self.modes2 - 1
# wavenumber (m1, m2)
k_x1 = torch.cat((torch.arange(start=0, end=self.modes1, step=1), \
torch.arange(start=-(self.modes1), end=0, step=1)), 0).reshape(m1, 1).repeat(1, m2).to(device)
k_x2 = torch.cat((torch.arange(start=0, end=self.modes2, step=1), \
torch.arange(start=-(self.modes2 - 1), end=0, step=1)), 0).reshape(1, m2).repeat(m1, 1).to(
device)
if iphi == None:
x = x_out
else:
x = iphi(x_out, code)
# K = <y, k_x>, (batch, N, m1, m2)
K1 = torch.outer(x[:, :, 0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)
K2 = torch.outer(x[:, :, 1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)
K = K1 + K2
# basis (batch, N, m1, m2)
basis = torch.exp(1j * 2 * np.pi * K).to(device)
# coeff (batch, channels, m1, m2)
u_ft2 = u_ft[..., 1:].flip(-1, -2).conj()
u_ft = torch.cat([u_ft, u_ft2], dim=-1)
# Y (batch, channels, N)
Y = torch.einsum("bcxy,bnxy->bcn", u_ft, basis)
Y = Y.real
return Y
class IPHI(nn.Module):
def __init__(self, width=32):
super(IPHI, self).__init__()
"""
inverse phi: x -> xi
"""
self.width = width
self.fc0 = nn.Linear(4, self.width)
self.fc_code = nn.Linear(42, self.width)
self.fc_no_code = nn.Linear(3 * self.width, 4 * self.width)
self.fc1 = nn.Linear(4 * self.width, 4 * self.width)
self.fc2 = nn.Linear(4 * self.width, 4 * self.width)
self.fc3 = nn.Linear(4 * self.width, 4 * self.width)
self.fc4 = nn.Linear(4 * self.width, 2)
self.activation = torch.tanh
self.center = torch.tensor([0.0001, 0.0001], device="cuda").reshape(1, 1, 2)
self.B = np.pi * torch.pow(2, torch.arange(0, self.width // 4, dtype=torch.float, device="cuda")).reshape(1, 1,
1,
self.width // 4)
def forward(self, x, code=None):
# x (batch, N_grid, 2)
# code (batch, N_features)
# some feature engineering
angle = torch.atan2(x[:, :, 1] - self.center[:, :, 1], x[:, :, 0] - self.center[:, :, 0])
radius = torch.norm(x - self.center, dim=-1, p=2)
xd = torch.stack([x[:, :, 0], x[:, :, 1], angle, radius], dim=-1)
# sin features from NeRF
b, n, d = xd.shape[0], xd.shape[1], xd.shape[2]
x_sin = torch.sin(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
x_cos = torch.cos(self.B * xd.view(b, n, d, 1)).view(b, n, d * self.width // 4)
xd = self.fc0(xd)
xd = torch.cat([xd, x_sin, x_cos], dim=-1).reshape(b, n, 3 * self.width)
if code != None:
cd = self.fc_code(code)
cd = cd.unsqueeze(1).repeat(1, xd.shape[1], 1)
xd = torch.cat([cd, xd], dim=-1)
else:
xd = self.fc_no_code(xd)
xd = self.fc1(xd)
xd = self.activation(xd)
xd = self.fc2(xd)
xd = self.activation(xd)
xd = self.fc3(xd)
xd = self.activation(xd)
xd = self.fc4(xd)
return x + x * xd
class Model(nn.Module):
def __init__(self, args, is_mesh=True, modes1=12, modes2=12, s1=96, s2=96):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.is_mesh = is_mesh
self.s1 = s1
self.s2 = s2
self.fc0 = nn.Linear(in_channels, self.width) # input channel is 3: (a(x, y), x, y)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2, s1, s2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv4 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2, s1, s2)
self.w1 = nn.Conv2d(self.width, self.width, 1)
self.w2 = nn.Conv2d(self.width, self.width, 1)
self.w3 = nn.Conv2d(self.width, self.width, 1)
self.b0 = nn.Conv2d(2, self.width, 1)
self.b1 = nn.Conv2d(2, self.width, 1)
self.b2 = nn.Conv2d(2, self.width, 1)
self.b3 = nn.Conv2d(2, self.width, 1)
self.b4 = nn.Conv1d(2, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, u, code=None, x_in=None, x_out=None, iphi=None):
# u (batch, Nx, d) the input value
# code (batch, Nx, d) the input features
# x_in (batch, Nx, 2) the input mesh (sampling mesh)
# xi (batch, xi1, xi2, 2) the computational mesh (uniform)
# x_in (batch, Nx, 2) the input mesh (query mesh)
if self.is_mesh and x_in == None:
x_in = u
if self.is_mesh and x_out == None:
x_out = u
grid = self.get_grid([u.shape[0], self.s1, self.s2], u.device).permute(0, 3, 1, 2)
u = self.fc0(u)
u = u.permute(0, 2, 1)
uc1 = self.conv0(u, x_in=x_in, iphi=iphi, code=code)
uc3 = self.b0(grid)
uc = uc1 + uc3
uc = F.gelu(uc)
uc1 = self.conv1(uc)
uc2 = self.w1(uc)
uc3 = self.b1(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
uc1 = self.conv2(uc)
uc2 = self.w2(uc)
uc3 = self.b2(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
uc1 = self.conv3(uc)
uc2 = self.w3(uc)
uc3 = self.b3(grid)
uc = uc1 + uc2 + uc3
uc = F.gelu(uc)
u = self.conv4(uc, x_out=x_out, iphi=iphi, code=code)
u3 = self.b4(x_out.permute(0, 2, 1))
u = u + u3
u = u.permute(0, 2, 1)
u = self.fc1(u)
u = F.gelu(u)
u = self.fc2(u)
return u
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 11,055 | 36.733788 | 130 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_3D.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# 3d fourier layers
################################################################
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super(SpectralConv3d, self).__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.modes3 = modes3
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights3 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
self.weights4 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
dtype=torch.cfloat))
# Complex multiplication
def compl_mul3d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-3), x.size(-2), x.size(-1) // 2 + 1,
dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.modes1, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, :self.modes2, :self.modes3], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, :self.modes2, :self.modes3], self.weights2)
out_ft[:, :, :self.modes1, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, :self.modes1, -self.modes2:, :self.modes3], self.weights3)
out_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3] = \
self.compl_mul3d(x_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3], self.weights4)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
self.modes1 = args.num_basis
self.modes2 = args.num_basis
self.modes3 = args.num_basis // 2
self.width = args.d_model
self.padding = [int(x) for x in args.padding.split(',')]
self.conv0 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv1 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv2 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.conv3 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
self.w0 = nn.Conv3d(self.width, self.width, 1)
self.w1 = nn.Conv3d(self.width, self.width, 1)
self.w2 = nn.Conv3d(self.width, self.width, 1)
self.w3 = nn.Conv3d(self.width, self.width, 1)
self.bn0 = torch.nn.BatchNorm3d(self.width)
self.bn1 = torch.nn.BatchNorm3d(self.width)
self.bn2 = torch.nn.BatchNorm3d(self.width)
self.bn3 = torch.nn.BatchNorm3d(self.width)
self.fc0 = nn.Linear(in_channels + 3, self.width)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1], 0, self.padding[2]])
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[2], :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
| 6,128 | 41.86014 | 103 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/FNO_2D.py | """
@author: Zongyi Li
modified by Haixu Wu to adapt to this code base
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# fourier layer
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = (1 / (in_channels * out_channels))
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1) // 2 + 1, dtype=torch.cfloat,
device=x.device)
out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)
out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
self.modes1 = args.num_basis
self.modes2 = args.num_basis
self.width = args.d_model
self.padding = [int(x) for x in args.padding.split(',')]
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv2d(self.width, self.width, 1)
self.w1 = nn.Conv2d(self.width, self.width, 1)
self.w2 = nn.Conv2d(self.width, self.width, 1)
self.w3 = nn.Conv2d(self.width, self.width, 1)
self.fc0 = nn.Linear(in_channels + 2, self.width) # input channel is 3: (a(x, y), x, y)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1]])
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 4,586 | 36.598361 | 111 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_2D.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 2D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[3, 3], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv2d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv2d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W = x.shape
L = H * W
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W = x.shape
L = H * W
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1],
self.patch_size[1]).contiguous() \
.permute(0, 2, 4, 1, 3, 5).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]), x.shape[1],
self.patch_size[0],
self.patch_size[1])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), C, self.patch_size[0],
self.patch_size[1]).permute(0, 3, 1, 4, 2, 5).contiguous() \
.view(B, C, H, W).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# projectors
self.padding = padding
self.fc0 = nn.Linear(in_channels + 2, width)
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1]])
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
x = self.outc(x)
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
| 9,905 | 40.103734 | 122 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/models/LSM_3D.py | """
@author: Haixu Wu
"""
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import math
################################################################
# Multiscale modules 3D
################################################################
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm3d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool3d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose3d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
################################################################
# Patchify and Neural Spectral Block
################################################################
class NeuralSpectralBlock2d(nn.Module):
def __init__(self, width, num_basis, patch_size=[8, 8, 4], num_token=4):
super(NeuralSpectralBlock2d, self).__init__()
self.patch_size = patch_size
self.width = width
self.num_basis = num_basis
# basis
self.modes_list = (1.0 / float(num_basis)) * torch.tensor([i for i in range(num_basis)],
dtype=torch.float).cuda()
self.weights = nn.Parameter(
(1 / (width)) * torch.rand(width, self.num_basis * 2, dtype=torch.float))
# latent
self.head = 8
self.num_token = num_token
self.latent = nn.Parameter(
(1 / (width)) * torch.rand(self.head, self.num_token, width // self.head, dtype=torch.float))
self.encoder_attn = nn.Conv3d(self.width, self.width * 2, kernel_size=1, stride=1)
self.decoder_attn = nn.Conv3d(self.width, self.width, kernel_size=1, stride=1)
self.softmax = nn.Softmax(dim=-1)
def self_attn(self, q, k, v):
# q,k,v: B H L C/H
attn = self.softmax(torch.einsum("bhlc,bhsc->bhls", q, k))
return torch.einsum("bhls,bhsc->bhlc", attn, v)
def latent_encoder_attn(self, x):
# x: B C H W
B, C, H, W, T = x.shape
L = H * W * T
latent_token = self.latent[None, :, :, :].repeat(B, 1, 1, 1)
x_tmp = self.encoder_attn(x).view(B, C * 2, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head, 2).permute(4, 0, 2, 1, 3).contiguous()
latent_token = self.self_attn(latent_token, x_tmp[0], x_tmp[1]) + latent_token
latent_token = latent_token.permute(0, 1, 3, 2).contiguous().view(B, C, self.num_token)
return latent_token
def latent_decoder_attn(self, x, latent_token):
# x: B C L
x_init = x
B, C, H, W, T = x.shape
L = H * W * T
latent_token = latent_token.view(B, self.head, C // self.head, self.num_token).permute(0, 1, 3, 2).contiguous()
x_tmp = self.decoder_attn(x).view(B, C, -1).permute(0, 2, 1).contiguous() \
.view(B, L, self.head, C // self.head).permute(0, 2, 1, 3).contiguous()
x = self.self_attn(x_tmp, latent_token, latent_token)
x = x.permute(0, 1, 3, 2).contiguous().view(B, C, H, W, T) + x_init # B H L C/H
return x
def get_basis(self, x):
# x: B C N
x_sin = torch.sin(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
x_cos = torch.cos(self.modes_list[None, None, None, :] * x[:, :, :, None] * math.pi)
return torch.cat([x_sin, x_cos], dim=-1)
def compl_mul2d(self, input, weights):
return torch.einsum("bilm,im->bil", input, weights)
def forward(self, x):
B, C, H, W, T = x.shape
# patchify
x = x.view(x.shape[0], x.shape[1],
x.shape[2] // self.patch_size[0], self.patch_size[0], x.shape[3] // self.patch_size[1], self.patch_size[1],
x.shape[4] // self.patch_size[2], self.patch_size[2]).contiguous() \
.permute(0, 2, 4, 6, 1, 3, 5, 7).contiguous() \
.view(x.shape[0] * (x.shape[2] // self.patch_size[0]) * (x.shape[3] // self.patch_size[1]) * (
x.shape[4] // self.patch_size[2]), x.shape[1], self.patch_size[0], self.patch_size[1], self.patch_size[2])
# Neural Spectral
# (1) encoder
latent_token = self.latent_encoder_attn(x)
# (2) transition
latent_token_modes = self.get_basis(latent_token)
latent_token = self.compl_mul2d(latent_token_modes, self.weights) + latent_token
# (3) decoder
x = self.latent_decoder_attn(x, latent_token)
# de-patchify
x = x.view(B, (H // self.patch_size[0]), (W // self.patch_size[1]), (T // self.patch_size[2]), C,
self.patch_size[0], self.patch_size[1], self.patch_size[2]).permute(0, 4, 1, 5, 2, 6, 3, 7).contiguous() \
.view(B, C, H, W, T).contiguous()
return x
class Model(nn.Module):
def __init__(self, args, bilinear=True):
super(Model, self).__init__()
in_channels = args.in_dim
out_channels = args.out_dim
width = args.d_model
num_token = args.num_token
num_basis = args.num_basis
patch_size = [int(x) for x in args.patch_size.split(',')]
padding = [int(x) for x in args.padding.split(',')]
# multiscale modules
self.inc = DoubleConv(width, width)
self.down1 = Down(width, width * 2)
self.down2 = Down(width * 2, width * 4)
self.down3 = Down(width * 4, width * 8)
factor = 2 if bilinear else 1
self.down4 = Down(width * 8, width * 16 // factor)
self.up1 = Up(width * 16, width * 8 // factor, bilinear)
self.up2 = Up(width * 8, width * 4 // factor, bilinear)
self.up3 = Up(width * 4, width * 2 // factor, bilinear)
self.up4 = Up(width * 2, width, bilinear)
self.outc = OutConv(width, width)
# Patchified Neural Spectral Blocks
self.process1 = NeuralSpectralBlock2d(width, num_basis, patch_size, num_token)
self.process2 = NeuralSpectralBlock2d(width * 2, num_basis, patch_size, num_token)
self.process3 = NeuralSpectralBlock2d(width * 4, num_basis, patch_size, num_token)
self.process4 = NeuralSpectralBlock2d(width * 8, num_basis, patch_size, num_token)
self.process5 = NeuralSpectralBlock2d(width * 16 // factor, num_basis, patch_size, num_token)
# projectors
self.padding = padding
self.fc0 = nn.Linear(in_channels + 3, width)
self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
if not all(item == 0 for item in self.padding):
x = F.pad(x, [0, self.padding[0], 0, self.padding[1], 0, self.padding[2]])
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(self.process5(x5), self.process4(x4))
x = self.up2(x, self.process3(x3))
x = self.up3(x, self.process2(x2))
x = self.up4(x, self.process1(x1))
x = self.outc(x)
if not all(item == 0 for item in self.padding):
x = x[..., :-self.padding[2], :-self.padding[1], :-self.padding[0]]
x = x.permute(0, 2, 3, 4, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
| 9,849 | 41.094017 | 126 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/utils/adam.py | import math
import torch
from torch import Tensor
from typing import List, Optional
from torch.optim.optimizer import Optimizer
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss
| 6,563 | 39.02439 | 120 | py |
Latent-Spectral-Models | Latent-Spectral-Models-main/utils/utilities3.py | import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
#################################################
# Utilities
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = True
self.h5 = False
self._load_file()
def _load_file(self):
if self.file_path[-3:] == '.h5':
self.data = h5py.File(self.file_path, 'r')
self.h5 = True
else:
try:
self.data = scipy.io.loadmat(self.file_path)
except:
self.data = h5py.File(self.file_path, 'r')
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if self.h5:
x = x[()]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:, sample_idx] + self.eps # T*batch*n
mean = self.mean[:, sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low) / (mymax - mymin)
self.b = -self.a * mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a * x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b) / self.a
x = x.view(s)
return x
# loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
# Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h ** (self.d / self.p)) * torch.norm(x.reshape(num_examples, -1) - y.reshape(num_examples, -1),
self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples, -1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms / y_norms)
else:
return torch.sum(diff_norms / y_norms)
return diff_norms / y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j + 1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j + 1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
def pdist(sample_1, sample_2, norm=2, eps=1e-5):
r"""Compute the matrix of all squared pairwise distances.
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(n_2, d)``.
norm : float
The l_p norm to be used.
Returns
-------
torch.Tensor or Variable
Matrix of shape (n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
n_1, n_2 = sample_1.size(0), sample_2.size(0)
norm = float(norm)
if norm == 2.:
norms_1 = torch.sum(sample_1 ** 2, dim=1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=1, keepdim=True)
norms = (norms_1.expand(n_1, n_2) +
norms_2.transpose(0, 1).expand(n_1, n_2))
distances_squared = norms - 2 * sample_1.mm(sample_2.t())
return torch.sqrt(eps + torch.abs(distances_squared))
else:
dim = sample_1.size(1)
expanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim)
expanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim)
differences = torch.abs(expanded_1 - expanded_2) ** norm
inner = torch.sum(differences, dim=2, keepdim=False)
return (eps + inner) ** (1. / norm)
class MMDStatistic:
r"""The *unbiased* MMD test of :cite:`gretton2012kernel`.
The kernel used is equal to:
.. math ::
k(x, x') = \sum_{j=1}^k e^{-\alpha_j\|x - x'\|^2},
for the :math:`\alpha_j` proved in :py:meth:`~.MMDStatistic.__call__`.
Arguments
---------
n_1: int
The number of points in the first sample.
n_2: int
The number of points in the second sample."""
def __init__(self, n_1, n_2):
self.n_1 = n_1
self.n_2 = n_2
# The three constants used in the test.
self.a00 = 1. / (n_1 * (n_1 - 1))
self.a11 = 1. / (n_2 * (n_2 - 1))
self.a01 = - 1. / (n_1 * n_2)
def __call__(self, sample_1, sample_2, alphas, ret_matrix=False):
r"""Evaluate the statistic.
The kernel used is
.. math::
k(x, x') = \sum_{j=1}^k e^{-\alpha_j \|x - x'\|^2},
for the provided ``alphas``.
Arguments
---------
sample_1: :class:`torch:torch.autograd.Variable`
The first sample, of size ``(n_1, d)``.
sample_2: variable of shape (n_2, d)
The second sample, of size ``(n_2, d)``.
alphas : list of :class:`float`
The kernel parameters.
ret_matrix: bool
If set, the call with also return a second variable.
This variable can be then used to compute a p-value using
:py:meth:`~.MMDStatistic.pval`.
Returns
-------
:class:`float`
The test statistic.
:class:`torch:torch.autograd.Variable`
Returned only if ``ret_matrix`` was set to true."""
sample_12 = torch.cat((sample_1, sample_2), 0)
distances = pdist(sample_12, sample_12, norm=2)
kernels = None
for alpha in alphas:
kernels_a = torch.exp(- alpha * distances ** 2)
if kernels is None:
kernels = kernels_a
else:
kernels = kernels + kernels_a
k_1 = kernels[:self.n_1, :self.n_1]
k_2 = kernels[self.n_1:, self.n_1:]
k_12 = kernels[:self.n_1, self.n_1:]
mmd = (2 * self.a01 * k_12.sum() +
self.a00 * (k_1.sum() - torch.trace(k_1)) +
self.a11 * (k_2.sum() - torch.trace(k_2)))
if ret_matrix:
return mmd, kernels
else:
return mmd
# print the number of parameters
def count_params(model):
c = 0
for p in list(model.parameters()):
c += reduce(operator.mul, list(p.size()))
return c
| 10,440 | 28.246499 | 116 | py |
FaceChat | FaceChat-main/app.py | async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = "eventlet"
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = "gevent"
except ImportError:
pass
if async_mode is None:
async_mode = "threading"
print("async_mode is " + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == "eventlet":
import eventlet
eventlet.monkey_patch()
elif async_mode == "gevent":
from gevent import monkey
monkey.patch_all()
# The Session instance is not used for direct access, you should always use flask.session
from flask_session import Session
import os
import random
import torch
import time
from threading import Thread
import collections
import queue
from flask import Flask, render_template, request, session, redirect
from flask_socketio import SocketIO
import numpy as np
import threading
#DEEMA#
from pydub import AudioSegment
sem = threading.Semaphore(1)
# audio processing
# from transformers import AutoProcessor, WhisperForConditionalGeneration
import scipy.signal as sps
import webrtcvad
from transformers import AutoProcessor, WhisperForConditionalGeneration
from TTS.api import TTS
import openai
from flask_session import Session
# image processing
import base64, cv2
import io
from PIL import Image
from engineio.payload import Payload
from deepface import DeepFace
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
Payload.max_decode_packets = 2048
openai.api_key = os.environ["OPENAI_API_KEY"]
app = Flask(__name__)
app.debug = True
socketio = SocketIO(app, cors_allowed_origins="*")
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
def isEnglish(s):
try:
s.encode(encoding="utf-8").decode("ascii")
except UnicodeDecodeError:
return False
else:
return True
def genWaveHeader(sampleRate, bitsPerSample, channels, samples):
datasize = samples * channels * bitsPerSample // 8
o = bytes("RIFF", "ascii") # (4byte) Marks file as RIFF
o += (datasize + 36).to_bytes(4, "little") # (4byte) File size in bytes excluding this and RIFF marker
o += bytes("WAVE", "ascii") # (4byte) File type
o += bytes("fmt ", "ascii") # (4byte) Format Chunk Marker
o += (16).to_bytes(4, "little") # (4byte) Length of above format data
o += (1).to_bytes(2, "little") # (2byte) Format type (1 - PCM)
o += (channels).to_bytes(2, "little") # (2byte)
o += (sampleRate).to_bytes(4, "little") # (4byte)
o += (sampleRate * channels * bitsPerSample // 8).to_bytes(4, "little") # (4byte)
o += (channels * bitsPerSample // 8).to_bytes(2, "little") # (2byte)
o += (bitsPerSample).to_bytes(2, "little") # (2byte)
o += bytes("data", "ascii") # (4byte) Data Chunk Marker
o += (datasize).to_bytes(4, "little") # (4byte) Data size in bytes
return o
def Int2Float(sound):
_sound = np.copy(sound) #
abs_max = np.abs(_sound).max()
_sound = _sound.astype("float32")
if abs_max > 0:
_sound *= 1 / abs_max
audio_float32 = torch.from_numpy(_sound.squeeze())
return audio_float32
class ASR:
def __init__(self) -> None:
#**BENCHMARK**#
# it was model_name
#self.model_name = "openai/whisper-tiny" #1
#self.model_name = "openai/whisper-base" #2
#self.model_name = "openai/whisper-small" #3
self.model_name = "openai/whisper-medium" #4
#self.model_name = "openai/whisper-large" #5
self.processor = AutoProcessor.from_pretrained(self.model_name)
self.model = WhisperForConditionalGeneration.from_pretrained(self.model_name)
self.model_sample_rate = 16000
self.device = torch.device("cpu")
def __call__(self, data, sample_rate=16000) -> str:
"""
Args:
data: PCM float32 format
sample_rate: the sample rate of data
"""
is_valid = True
# first, resample the data to the model's sample_rate
if sample_rate != self.model_sample_rate:
number_of_samples = round(len(data) * float(self.model_sample_rate) / sample_rate)
data = sps.resample(data, number_of_samples)
# genearte text
inputs = self.processor(data, return_tensors="pt", sampling_rate=self.model_sample_rate)
input_features = inputs.input_features.to(self.device)
generated_ids = self.model.generate(inputs=input_features)
text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
if len(generated_ids[0]) < 4:
is_valid = False
if not isEnglish(text):
is_valid = False
return text, is_valid
def to(self, device):
self.model = self.model.to(device)
self.device = device
return self
class TTSModel:
def __init__(self) -> None:
self.model = TTS("tts_models/en/vctk/vits", gpu=True)
def __call__(self, text) -> np.float32:
wav = self.model.tts(text, speaker=self.model.speakers[33])
return wav
class AudioContext:
"""Streams raw audio from web microphone. Data is received in a separate thread, and stored in a buffer, to be read from.
"""
MIC_RATE = 16000
CHANNELS = 1
BLOCKS_PER_SECOND = 50
def __init__(self):
self.audio_buffer = queue.Queue()
def read(self):
"""Return a block of audio data, blocking if necessary."""
return self.audio_buffer.get()
def update(self, audioData):
"""Update the audio buffer."""
self.audio_buffer.put(audioData)
class GPT3Chatbot:
def __init__(self, model_name="text-davinci-003"):
self.model_name = model_name
self.instruct_prompt = \
"""Agent: Hello, How can I help you today?
"""
# """Background: Sony Life Insurance Co., Ltd. is a Japanese insurance company founded in 1979 as a joint venture between Sony Corporation and Prudential Financial and headquartered in Tokyo.
# Instruction: Be an insurance agent that works for Sony Insurance Company and chat with a customer. The response should be short and informative.
#Agent: Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?
# """
self.bot_prompt = "Agent: "
self.user_prompt = "User: "
self.context = self.instruct_prompt
self.emotion_prompt = "(User is in neutral emotion)"
if "davinci-003" in model_name:
self.use_emotion_prompt = True
else:
self.use_emotion_prompt = False
def get_response(self, user_input):
if "restart" in user_input.lower() or "reset" in user_input.lower():
self.reset()
return "Hello, How can I help you today?" #"Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?"
error_responses = ["Let me think...", "Give me some seconds...", "Wait a second"]
user_input = self.user_prompt + user_input + "\n"
if self.use_emotion_prompt:
user_input += self.emotion_prompt + "\n"
completion_prompt = self.context + user_input + self.bot_prompt
request_success = False
while not request_success:
try:
response = openai.Completion.create(
model=self.model_name, prompt=completion_prompt, temperature=0.95, max_tokens=128, top_p=0.95,
)
request_success = True
except Exception as e:
print(e)
error_response = random.choice(error_responses)
audio_speak(error_response)
print("Request failed, retrying...")
response = response["choices"][0]["text"].strip()
self.context += user_input + self.bot_prompt + response + "\n"
return response
def reset(self):
self.context = self.instruct_prompt
reset_audio_buffer()
# GPT models
# to be used for the BENCHMARK
chatbot = GPT3Chatbot("text-davinci-002")
#chatbot = GPT3Chatbot("text-davinci-003")
#chatbot = GPT3Chatbot("text-curie-001")
#chatbot = GPT3Chatbot("text-babbage-001")
#chatbot = GPT3Chatbot("text-ada-001")
asr_model = ASR()
tts_model = TTSModel()
# specify the running device
# device = torch.device("cuda")
# force cuda
device = torch.device('cuda')
asr_model = asr_model.to(device)
audio_buffer = queue.Queue()
audio_buffer_lock = False
def reset_audio_buffer():
global audio_buffer
audio_buffer.queue.clear()
@socketio.on("audio_listen")
def audio_listen(audioData):
global audio_context
global audio_buffer
global audio_buffer_lock
if not audio_buffer_lock:
audio_buffer.put(audioData)
@socketio.on("start_chat")
def start_chat(data):
global audio_buffer_lock
audio_buffer_lock = False
@socketio.on("stop_chat")
def stop_chat(data):
print("stopping...")
global audio_buffer_lock
audio_buffer_lock = True
session["name"] = None
sem.release()
socketio.emit('logout', "login");
# to be used for the BENCHMARK
f.close()
@socketio.on("system_init")
def system_init(audioData):
# speak
#audio_speak("Hello, I am an insurance agent from Sony Insurance Company. How can I help you today?")
audio_speak("Hello, How can I help you today?")
# # delay the next request
@app.route("/update-text", methods=["POST"])
def update_text():
text = chatbot.context
return text
@app.route("/", methods=["POST", "GET"])
def index():
print("intialized")
if not session.get("name"):
return redirect("/login")
if sem._value == 0:
return render_template("login.html", msg="Please wait, the agent is busy with another client...")
sem.acquire()
# reset the chatbot and buffer queue
chatbot.reset()
reset_audio_buffer()
return render_template("index.html")
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method=='POST':
session["name"] = request.form.get('name')
return redirect("/")
return render_template("login.html")
class VADAudio:
"""Filter & segment audio with voice activity detection."""
def __init__(self, input_rate, audio_context):
self.input_rate = input_rate
self.audio_context = audio_context
self.RATE_PROCESS = 16000
self.block_size = 743
self.frame_duration_ms = 1000 * self.block_size // self.input_rate
self.sample_rate = 16000
self.silence_duration_ms = 500
self.vad = webrtcvad.Vad(mode=3)
def vad_collector(self, padding_ms=300, ratio=0.75, frames=None):
"""Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
global vad_model
global vad_iterator
global audio_buffer
global audio_buffer_lock
num_padding_frames = padding_ms // self.frame_duration_ms
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
empty_frame_count = 0
max_empty_frame_count = self.silence_duration_ms // self.frame_duration_ms
while True:
if audio_buffer_lock:
continue
frame = audio_buffer.get()
is_speech = self.vad.is_speech(frame[-960:], self.sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
# if speaking
num_voiced = len([f for f, is_speech in ring_buffer if is_speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for frame, is_speech in ring_buffer:
yield frame
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
# if not seapking
num_unvoiced = len([f for f, is_speech in ring_buffer if not is_speech])
if num_unvoiced > ratio * ring_buffer.maxlen:
# detects 5 consecutive empty frames
if empty_frame_count > max_empty_frame_count:
triggered = False
yield None
ring_buffer.clear()
empty_frame_count = 0
else:
empty_frame_count += 1
else:
# reset empty_frame_count if detects speech
empty_frame_count = 0
# to be used for the BENCHMARK
f = open(str(chatbot.model_name)+"_"+str(asr_model.model_name[asr_model.model_name.index("/")+1:])+".txt", "a")
class EngagementDetector(Thread):
def __init__(self, audio_context):
Thread.__init__(self)
self.audio_context = audio_context
self.vad_audio = VADAudio(input_rate=16000, audio_context=self.audio_context)
self.vad_model, vad_utils = torch.hub.load(repo_or_dir="snakers4/silero-vad", model="silero_vad")
(self.get_speech_ts, save_audio, read_audio, VADIterator, collect_chunks) = vad_utils
self.count = 0
def run(self):
frames = self.vad_audio.vad_collector()
wav_data = bytearray()
vad_model, vad_utils = torch.hub.load(repo_or_dir="snakers4/silero-vad", model="silero_vad")
(get_speech_ts, save_audio, read_audio, VADIterator, collect_chunks) = vad_utils
print("Listening...")
for frame in frames:
if frame is not None:
wav_data.extend(frame)
else:
data = np.frombuffer(wav_data, np.int16)
data = Int2Float(data)
# two-stage VAD
time_stamps = get_speech_ts(data, vad_model)
if len(time_stamps) > 0:
print("Speaking:", end="")
# to be used for the BENCHMARK
s_time = time.time()
text, is_asr_valid = asr_model(data, sample_rate=16000)
print(text)
if is_asr_valid:
chatbot_response = chatbot.get_response(text)
# to be used for the BENCHMARK
f.write(str(time.time()-s_time))
f.write("\n")
# speak
audio_speak(chatbot_response)
# clear buffer if speech detected
wav_data = bytearray()
def audio_speak(text):
global audio_buffer_lock
print(text)
audio_buffer_lock = True
# reset audio buffer to avoid interference
reset_audio_buffer()
audio_float32 = tts_model(text)
audio_int16 = (np.array(audio_float32, dtype=np.float32) * 32768).astype(np.int16)
wav_header = genWaveHeader(sampleRate=22050, bitsPerSample=16, channels=1, samples=len(audio_int16))
speak_data = wav_header + audio_int16.tobytes()
now = len(text.split(" "))
audio_io = io.BytesIO(speak_data)
# Create AudioSegment object from the in-memory file object
# Get duration of audio in milliseconds
duration_ms = AudioSegment.from_file(audio_io, format="wav")
duration_ms = len(duration_ms)
# Convert duration to seconds
now = duration_ms/1000.0
# we need the size of the text
print(f"TTS Duration: {now}")
socketio.emit("audio_speak", {"voice": speak_data, "words": now});
print(f"sending data! {text}")
time.sleep(len(audio_int16) / 22050)
audio_buffer_lock = False
def read_image_b64(base64_string):
"decode base64"
idx = base64_string.find("base64,")
base64_string = base64_string[idx + 7 :]
sbuf = io.BytesIO()
sbuf.write(base64.b64decode(base64_string, " /"))
pimg = Image.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
def moving_average(x):
return np.mean(x)
# given 20 fps, control the image buffer
image_buffer = queue.Queue(maxsize=5)
@socketio.on("image_observe")
def image_observe(data_image):
global image_buffer
frame = read_image_b64(data_image)
image_buffer.put(frame)
class VideoProcessor(Thread):
def __init__(self, image_buffer):
Thread.__init__(self)
self.image_buffer = image_buffer
self._fps_array = [0]
def frame_generator(self):
while True:
frame = self.image_buffer.get()
yield frame
def run(self):
frames = self.frame_generator()
prev_recv_time = time.time()
fps = 0
cnt = 0
prev_box_pos = np.array([0, 0, 0, 0])
prev_scores = np.zeros(7)
emotion_beta = 0.99
box_beta = 0.2
for frame in frames:
try:
obj = DeepFace.analyze(
frame, actions=["emotion"], enforce_detection=False, silent=True, detector_backend="ssd"
)
if isinstance(obj, list):
obj = obj[0]
except Exception as e:
print(e)
continue
emotions, scores = zip(*obj["emotion"].items())
scores = list(scores)
# emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
# mask out disgust
scores[1] = scores[1] - 1000
# mask out fear
scores[2] = scores[2] - 1000
# mask out surprise
scores[5] = scores[5] - 1000
# give more weight to happy
# scores[3] = scores[3] * 2.0
scores = prev_scores * emotion_beta + np.array(scores) * (1 - emotion_beta)
# apply softmax
scores = np.exp(scores) / np.sum(np.exp(scores))
prev_scores = scores
index = np.argmax(scores)
pred_emotion = emotions[index]
# x, y, w, h
box_pos = np.array(list(obj["region"].values()))
if (
pred_emotion in emotions
and (box_pos[0] > 0 and box_pos[1] > 0)
and (box_pos[0] < 400 and box_pos[1] < 300)
):
box_pos = prev_box_pos * box_beta + box_pos * (1 - box_beta)
box_pos = np.rint(box_pos).astype(int)
x, y, w, h = box_pos
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(
frame,
pred_emotion,
(x - 10, y - 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.9,
color=(255, 0, 0),
thickness=2,
)
# old_pred_emotion = pred_emotion
prev_box_pos = box_pos
if pred_emotion == "happy":
chatbot.emotion_prompt = "(User is in happy emotion)"
elif pred_emotion == "sad":
chatbot.emotion_prompt = "(User is in sad emotion)"
elif pred_emotion == "angry":
chatbot.emotion_prompt = "(User is in angry emotion)"
elif pred_emotion == "neutral":
chatbot.emotion_prompt = "(User is in neutral emotion)"
else:
pred_emotion = "neutral"
chatbot.emotion_prompt = "(User is in neutral emotion)"
recv_time = time.time()
cv2.putText(
frame,
"fps " + str(fps),
(10, 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(255, 0, 0),
thickness=1,
)
# encode it into jpeg
imgencode = cv2.imencode(".jpeg", frame, [cv2.IMWRITE_JPEG_QUALITY, 40])[1]
# base64 encode
stringData = base64.b64encode(imgencode).decode("utf-8")
b64_src = "data:image/jpeg;base64,"
stringData = b64_src + stringData
# emit the frame back
socketio.emit("image_show", stringData)
fps = 1 / (recv_time - prev_recv_time)
self._fps_array.append(fps)
fps = round(moving_average(np.array(self._fps_array)), 1)
prev_recv_time = recv_time
# print(fps_array)
cnt += 1
if cnt == 30:
self._fps_array = [fps]
cnt = 0
# Globals
audio_context = AudioContext()
engagement_detector = EngagementDetector(audio_context)
engagement_detector.start()
video_process = VideoProcessor(image_buffer)
video_process.start()
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=55009, debug=False, keyfile="key.pem", certfile="cert.pem")
# app.run(host='0.0.0.0', debug=True, threaded=True, port=9900, ssl_context=("cert.pem", "key.pem"))
| 21,580 | 31.748103 | 194 | py |
GraphCAD | GraphCAD-main/gin_conv_weight.py | from typing import Callable, Optional, Union
import torch
from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
from ..inits import reset
class GINConv_w(MessagePassing):
r"""The graph isomorphism operator from the `"How Powerful are
Graph Neural Networks?" <https://arxiv.org/abs/1810.00826>`_ paper
.. math::
\mathbf{x}^{\prime}_i = h_{\mathbf{\Theta}} \left( (1 + \epsilon) \cdot
\mathbf{x}_i + \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \right)
or
.. math::
\mathbf{X}^{\prime} = h_{\mathbf{\Theta}} \left( \left( \mathbf{A} +
(1 + \epsilon) \cdot \mathbf{I} \right) \cdot \mathbf{X} \right),
here :math:`h_{\mathbf{\Theta}}` denotes a neural network, *.i.e.* an MLP.
Args:
nn (torch.nn.Module): A neural network :math:`h_{\mathbf{\Theta}}` that
maps node features :obj:`x` of shape :obj:`[-1, in_channels]` to
shape :obj:`[-1, out_channels]`, *e.g.*, defined by
:class:`torch.nn.Sequential`.
eps (float, optional): (Initial) :math:`\epsilon`-value.
(default: :obj:`0.`)
train_eps (bool, optional): If set to :obj:`True`, :math:`\epsilon`
will be a trainable parameter. (default: :obj:`False`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`
- **output:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V}_t|, F_{out})` if bipartite
"""
def __init__(self, nn: Callable, eps: float = 0., train_eps: bool = False,
**kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.nn = nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def reset_parameters(self):
reset(self.nn)
self.eps.data.fill_(self.initial_eps)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_weight: OptTensor = None,
size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=size)
x_r = x[1]
if x_r is not None:
out += (1 + self.eps) * x_r
return self.nn(out)
# def message(self, x_j: Tensor) -> Tensor:
# return x_j
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(nn={self.nn})'
| 3,471 | 35.166667 | 102 | py |
GraphCAD | GraphCAD-main/MAG/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --train_dir '/raid/chenbo/outlier_detection/release_data/mag_train.pkl' --test_dir '/raid/chenbo/outlier_detection/release_data/mag_test.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--train_dir', type=str, help="train_dir", required = True)
args.add_argument('--test_dir', type=str, help="test_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=100)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=1e-3)
args.add_argument('--min_lr', type=float, help="min lr", default=5e-4)
args.add_argument('--bs', type=int, help="batch size", default=4)
args.add_argument('--input_dim', type=int, help="input dimension", default=768)
args.add_argument('--out_dim', type=int, help="output dimension", default=768)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.train_dir, 'rb') as files:
train_data = pickle.load(files)
with open(args.test_dir, 'rb') as files:
test_data = pickle.load(files)
logger.info("# Batch: {} - {}".format(len(train_data), len(train_data) / args.bs))
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
max_step = int(len(train_data) / args.bs * 10)
logger.info("max_step: %d, %d, %d, %d"%(max_step, len(train_data), args.bs, args.epochs))
scheduler = WarmupLinearLR(optimizer, max_step, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
random.shuffle(train_data)
for tmp_train in tqdm(train_data):
batch_index += 1
batch_data, edge_labels = tmp_train
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for tmp_test in tqdm(test_data):
each_sub, edge_labels = tmp_test
node_outputs, adj_matrix, adj_weight, labels, batch_item = each_sub.x, each_sub.edge_index, each_sub.edge_attr.squeeze(-1), each_sub.y, each_sub.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,336 | 46.637755 | 213 | py |
GraphCAD | GraphCAD-main/MAG/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/MAG/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GCNConv, MessagePassing, GINConv, GATConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
# q = F.normalize(q.view(C, 1), p=2, dim=0)
# feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]
# pos_score, neg_score = torch.split(q2all_each_logits, [1, Neg + (b-1) * N], dim = -1)
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
# outlier_loss = contras_loss + consist_loss
outlier_loss = contras_loss + self.lp_weight * lp_loss
raw_feat_all = F.normalize(raw_feat_all, p=2, dim=1)
raw_centroid = F.normalize(raw_centroid.view(self.dim, 1), p=2, dim=0)
scores = torch.mm(raw_feat_all, raw_centroid.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.conv1 = GCNConv(in_channel, out_channel)
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,780 | 37.507874 | 189 | py |
GraphCAD | GraphCAD-main/AMiner/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --train_dir '/raid/chenbo/outlier_detection/release_data/aminer_train.pkl' --test_dir '/raid/chenbo/outlier_detection/release_data/aminer_test.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--train_dir', type=str, help="train_dir", required = True)
args.add_argument('--test_dir', type=str, help="test_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=100)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=2)
args.add_argument('--input_dim', type=int, help="input dimension", default=768)
args.add_argument('--out_dim', type=int, help="output dimension", default=768)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.train_dir, 'rb') as files:
train_data = pickle.load(files)
with open(args.test_dir, 'rb') as files:
test_data = pickle.load(files)
logger.info("# Batch: {} - {}".format(len(train_data), len(train_data) / args.bs))
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
max_step = int(len(train_data) / args.bs * 10)
logger.info("max_step: %d, %d, %d, %d"%(max_step, len(train_data), args.bs, args.epochs))
scheduler = WarmupLinearLR(optimizer, max_step, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
random.shuffle(train_data)
for tmp_train in tqdm(train_data):
batch_index += 1
batch_data, edge_labels = tmp_train
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for tmp_test in tqdm(test_data):
each_sub, edge_labels = tmp_test
node_outputs, adj_matrix, adj_weight, labels, batch_item = each_sub.x, each_sub.edge_index, each_sub.edge_attr.squeeze(-1), each_sub.y, each_sub.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,342 | 46.668367 | 213 | py |
GraphCAD | GraphCAD-main/AMiner/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/AMiner/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GCNConv, MessagePassing, GINConv, GATConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
# q = F.normalize(q.view(C, 1), p=2, dim=0)
# feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]
# pos_score, neg_score = torch.split(q2all_each_logits, [1, Neg + (b-1) * N], dim = -1)
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
# outlier_loss = contras_loss + consist_loss
outlier_loss = contras_loss + self.lp_weight * lp_loss
raw_feat_all = F.normalize(raw_feat_all, p=2, dim=1)
raw_centroid = F.normalize(raw_centroid.view(self.dim, 1), p=2, dim=0)
scores = torch.mm(raw_feat_all, raw_centroid.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.conv1 = GCNConv(in_channel, out_channel)
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,780 | 37.507874 | 189 | py |
GraphCAD | GraphCAD-main/Yelp/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --data_dir '/raid/chenbo/outlier_detection/release_data/yelp_data.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--data_dir', type=str, help="data_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=2000)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=1)
args.add_argument('--input_dim', type=int, help="input dimension", default=100)
args.add_argument('--out_dim', type=int, help="output dimension", default=100)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.data_dir, 'rb') as files:
data_collection = pickle.load(files)
data, y, train_mask, train_label_index, train_edge_ids, train_edge_labels, test_mask, test_label_index,test_edge_ids, test_edge_labels = data_collection
# for older version of pyg
# data = Data(**data.__dict__)
edges_attrs = torch.ones(data.edge_index.size(0))
data_set = DataLoader([Data(x = data.x.cuda(), edge_index = data.edge_index.cuda().t(), y = y.cuda(), edge_attr = edges_attrs.cuda().unsqueeze(-1))], batch_size=1, shuffle = True)
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
logger.info(f"Warm up schedular: {args.epochs}")
scheduler = WarmupLinearLR(optimizer, args.epochs, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
for batch_data in tqdm(data_set):
batch_index += 1
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
# training index
node_outputs = node_outputs[train_mask][train_label_index]
output_loss = output_loss[train_mask][train_label_index]
edge_prob = edge_prob[train_edge_ids]
edge_labels = train_edge_labels.cuda()
labels = labels[train_mask][train_label_index]
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for batch_test in tqdm(data_set):
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_test.x, batch_test.edge_index, batch_test.edge_attr.squeeze(-1), batch_test.y, batch_test.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
# test index
node_outputs = node_outputs[test_mask][test_label_index]
output_loss = output_loss[test_mask][test_label_index]
edge_prob = edge_prob[test_edge_ids]
edge_labels = test_edge_labels.cuda()
labels = labels[test_mask][test_label_index]
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 9,996 | 46.379147 | 213 | py |
GraphCAD | GraphCAD-main/Yelp/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/Yelp/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GINConv_w as GINConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
if non_zero.size(0) != 0 and zero.size(0) != 0:
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero][:8192]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
q = F.normalize(q.view(self.dim, 1), p=2, dim=0)
feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
# print(q2all_each_logits.size())
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]ß
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
outlier_loss = contras_loss + self.lp_weight * lp_loss
scores = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.bn1 = torch.nn.BatchNorm1d(in_channel)
self.conv1 = GINConv(
nn.Sequential(nn.Linear(out_channel, out_channel), nn.ReLU()))
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.bn1(x)
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,770 | 37.317647 | 189 | py |
GraphCAD | GraphCAD-main/Alpha/main.py | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import random
import json
import pickle
from collections import defaultdict
from operator import itemgetter
import logging
from torch_geometric.data import Data, DataLoader
from torch.optim.lr_scheduler import _LRScheduler
from models import GraphCAD, outlierLoss
from utils import *
torch.backends.cudnn.benchmark = True
torch.autograd.set_detect_anomaly(True)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# CUDA_VISIBLE_DEVICES=1 python main.py --data_dir '/raid/chenbo/outlier_detection/release_data/alpha_data.pkl'
def add_arguments(args):
# essential paras
args.add_argument('--data_dir', type=str, help="data_dir", required = True)
args.add_argument('--saved_dir', type=str, help="log_name", default= "saved_model")
args.add_argument('--log_name', type=str, help="log_name", default = "log")
# training paras.
args.add_argument('--epochs', type=int, help="training #epochs", default=1000)
args.add_argument('--seed', type=int, help="seed", default=1)
args.add_argument('--lr', type=float, help="learning rate", default=5e-4)
args.add_argument('--min_lr', type=float, help="min lr", default=1e-4)
args.add_argument('--bs', type=int, help="batch size", default=1)
args.add_argument('--input_dim', type=int, help="input dimension", default=256)
args.add_argument('--out_dim', type=int, help="output dimension", default=256)
args.add_argument('--verbose', type=int, help="eval", default=1)
# model paras.
args.add_argument('--outer_layer', type=int, help="#layers of GraphCAD", default = 2)
args.add_argument('--inner_layer', type=int, help="#layers of node_update", default = 1)
args.add_argument('--is_global', help="whether to add global information", action = "store_false")
args.add_argument('--is_edge', help="whether to use edge update", action = "store_false")
args.add_argument('--pooling', type=str, help="pooing_type", choices=['memory', 'avg', 'min', 'max'], default = "memory")
args.add_argument('--is_lp', help="whether to use link prediction loss", action = "store_false")
args.add_argument("--lp_weight", type = float, help="the weight of link prediction loss", default=0.1)
args = args.parse_args()
return args
def logging_builder(args):
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.join(os.getcwd(), args.log_name), mode='w')
fileHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
class WarmupLinearLR(_LRScheduler):
def __init__(self, optimizer, step_size, min_lr, peak_percentage=0.1, last_epoch=-1):
self.step_size = step_size
self.peak_step = peak_percentage * step_size
self.min_lr = min_lr
super(WarmupLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
ret = []
for tmp_min_lr, tmp_base_lr in zip(self.min_lr, self.base_lrs):
if self._step_count <= self.peak_step:
ret.append(tmp_min_lr + (tmp_base_lr - tmp_min_lr) * self._step_count / self.peak_step)
else:
ret.append(tmp_min_lr + max(0, (tmp_base_lr - tmp_min_lr) * (self.step_size - self._step_count) / (self.step_size - self.peak_step)))
# print(self._step_count, self.step_size, ret)
return ret
if __name__ == "__main__":
args = argparse.ArgumentParser()
args = add_arguments(args)
setup_seed(args.seed)
logger = logging_builder(args)
print(args)
os.makedirs(os.path.join(os.getcwd(), args.saved_dir), exist_ok = True)
encoder = GraphCAD(logger, args, args.input_dim, args.out_dim, args.outer_layer, args.inner_layer, is_global = args.is_global, is_edge = args.is_edge, pooling= args.pooling).cuda()
criterion = outlierLoss(args, logger, is_lp = args.is_lp, lp_weight = args.lp_weight).cuda()
with open(args.data_dir, 'rb') as files:
data_collection = pickle.load(files)
data, y, train_mask, train_label_index, train_edge_ids, train_edge_labels, test_mask, test_label_index,test_edge_ids, test_edge_labels = data_collection
# for older version of pyg
data = Data(**data.__dict__)
edges_attrs = torch.ones(data.edge_index.size(0))
data_set = DataLoader([Data(x = data.x.cuda(), edge_index = data.edge_index.cuda().t(), y = y.cuda(), edge_attr = edges_attrs.cuda().unsqueeze(-1))], batch_size=1, shuffle = True)
optimizer = torch.optim.Adam([{'params': encoder.parameters(), 'lr': args.lr}])
optimizer.zero_grad()
logger.info(f"Warm up schedular: {args.epochs}")
scheduler = WarmupLinearLR(optimizer, args.epochs, min_lr=[args.min_lr])
encoder.train()
epoch_num = 0
max_map = -1
max_auc = -1
max_epoch = -1
for epoch_num in range(args.epochs):
batch_loss = []
batch_contras_loss = []
batch_lp_loss = []
batch_edge_score = []
batch_labels = []
batch_index = 0
for batch_data in tqdm(data_set):
batch_index += 1
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_data.x, batch_data.edge_index, batch_data.edge_attr.squeeze(-1), batch_data.y, batch_data.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
# training index
node_outputs = node_outputs[train_mask][train_label_index]
output_loss = output_loss[train_mask][train_label_index]
edge_prob = edge_prob[train_edge_ids]
edge_labels = train_edge_labels.cuda()
labels = labels[train_mask][train_label_index]
overall_loss, _, contras_loss, lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
# overall_loss.backward()
overall_loss = overall_loss / args.bs
overall_loss.backward()
batch_loss.append(overall_loss.item())
batch_contras_loss.append(contras_loss.item())
batch_lp_loss.append(lp_loss.item())
if (batch_index + 1) % args.bs == 0:
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
avg_batch_loss = np.mean(np.array(batch_loss))
avg_batch_contras_loss = np.mean(np.array(batch_contras_loss))
avg_batch_lp_loss = np.mean(np.array(batch_lp_loss))
logger.info("Epoch:{} Overall loss: {:.6f} Contrastive loss: {:.6f} LP_loss: {:.6f}".format(epoch_num, avg_batch_loss, avg_batch_contras_loss, avg_batch_lp_loss))
if (epoch_num + 1) % args.verbose == 0:
encoder.eval()
test_loss = []
test_contras_loss = []
test_lp_loss = []
test_gt = []
labels_list = []
scores_list = []
with torch.no_grad():
for batch_test in tqdm(data_set):
node_outputs, adj_matrix, adj_weight, labels, batch_item = batch_test.x, batch_test.edge_index, batch_test.edge_attr.squeeze(-1), batch_test.y, batch_test.batch
node_outputs, adj_weight, centroid, output_loss, centroid_loss, edge_prob = encoder(node_outputs, adj_matrix, adj_weight, batch_item, 1)
centroid = centroid.squeeze(0)
centroid_loss = centroid_loss.squeeze(0)
# test index
node_outputs = node_outputs[test_mask][test_label_index]
output_loss = output_loss[test_mask][test_label_index]
edge_prob = edge_prob[test_edge_ids]
edge_labels = test_edge_labels.cuda()
labels = labels[test_mask][test_label_index]
test_each_overall_loss, scores, test_each_contras_loss, test_each_lp_loss = criterion(output_loss, centroid_loss, edge_prob, edge_labels, adj_matrix, batch_item, labels, node_outputs, centroid)
scores = scores.detach().cpu().numpy()
scores_list.append(scores)
labels = labels.detach().cpu().numpy()
test_gt.append(labels)
test_loss.append(test_each_overall_loss.item())
test_contras_loss.append(test_each_contras_loss.item())
test_lp_loss.append(test_each_lp_loss.item())
avg_test_loss = np.mean(np.array(test_loss))
avg_test_contras_loss = np.mean(np.array(test_contras_loss))
avg_test_lp_loss = np.mean(np.array(test_lp_loss))
auc, maps = MAPs(test_gt, scores_list)
logger.info("Epoch: {} Auc: {:.6f} Maps: {:.6f} Max-Auc: {:.6f} Max-Maps: {:.6f}".format(epoch_num, auc, maps, max_auc, max_map))
if maps > max_map or auc > max_auc:
max_epoch = epoch_num
max_map = maps if maps > max_map else max_map
max_auc = auc if auc > max_auc else max_auc
# state = {'encoder': encoder.state_dict()}
# torch.save(state, saved_file + "model_" + str(epoch_num))
logger.info("***************** Epoch: {} Max Auc: {:.6f} Maps: {:.6f} *******************".format(epoch_num, max_auc, max_map))
encoder.train()
optimizer.zero_grad()
logger.info("***************** Max_Epoch: {} Max Auc: {:.6f} Maps: {:.6f}*******************".format(max_epoch, max_auc, max_map)) | 10,052 | 46.419811 | 213 | py |
GraphCAD | GraphCAD-main/Alpha/utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import multiprocessing
from sklearn.metrics import roc_auc_score, auc, roc_curve
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from operator import itemgetter
from scipy import sparse
import random
def MAPs(label_lists, score_lists):
assert len(label_lists) == len(score_lists)
maps = []
mean_auc = []
total_count = 0
# print(np.array(score_lists).shape)
total_nan = 0
for sub_labels, sub_scores in zip(label_lists, score_lists):
assert len(sub_labels) == len(sub_scores)
combine = [each for each in zip(sub_scores, sub_labels)]
sorted_combine = sorted(combine, key=itemgetter(0))
# print(sorted_combine)
rights = 0
ps = []
tmp_scores = []
tmp_labels = []
for index in range(len(sorted_combine)):
ins_scores, ins_labels = sorted_combine[index]
tmp_scores.append(ins_scores)
tmp_labels.append(ins_labels)
if(ins_labels == 0):
rights += 1
ps.append(rights/(index+1))
tmp_scores = np.array(tmp_scores)
nan_num = len(tmp_scores[np.isnan(tmp_scores)])
total_nan += nan_num
tmp_scores = np.nan_to_num(tmp_scores)
tmp_labels = np.array(tmp_labels)
auc = roc_auc_score(1-tmp_labels, -1 * tmp_scores)
ap = np.mean(np.array(ps))
maps.append((ap, len(sub_labels)))
mean_auc.append(auc)
total_count += len(sub_labels)
assert len(maps) == len(mean_auc) == len(label_lists)
maps_scores = 0
maps_weight = 0
for each in maps:
ap, count = each
each_w = total_count / count
maps_scores += ap * each_w
maps_weight += each_w
norm_maps = maps_scores/maps_weight
mean_auc = np.mean(np.array(mean_auc))
return mean_auc, norm_maps
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
| 2,237 | 27.329114 | 96 | py |
GraphCAD | GraphCAD-main/Alpha/models.py | from random import sample
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
import pickle
from torch_geometric.nn import GINConv_w as GINConv
from torch_geometric.utils import add_self_loops, degree, softmax, to_dense_adj, dense_to_sparse
from torch_scatter import scatter_add
import math
import numpy as np
import pyro
class outlierLoss(nn.Module):
def __init__(self, args, logger, is_lp = True, lp_weight = 0.1):
super(outlierLoss, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss().cuda()
self.dim = args.out_dim
self.temp = 0.1
self.margin_1 = 0.5
self.lamb = 0.5
self.thres = torch.tensor(0.0).cuda()
self.lp_weight = lp_weight
self.is_lp = is_lp
logger.info("is_lp: %s, lp_weight: %f"%(self.is_lp, self.lp_weight))
def cal_lp_loss(self, edge_prob, edge_labels):
# feat_all_trans: [(b x N) x C]
non_zero = torch.nonzero(edge_labels)
zero = torch.nonzero(edge_labels == 0)
if non_zero.size(0) != 0 and zero.size(0) != 0:
pos_prob = edge_prob[non_zero][:8192]
neg_prob = edge_prob[zero]
logits = torch.cat((pos_prob, neg_prob.view(1, neg_prob.size(0)).repeat(pos_prob.size(), 1)), dim = 1)
logits_labels = torch.zeros([pos_prob.size(0)]).cuda().long()
lp_loss = self.cross_entropy(logits/self.temp, logits_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
return lp_loss
def forward(self, feat_all, q, edge_prob, edge_labels, adj_mat, batch_item, labels, raw_feat_all, raw_centroid):
# q2all: [N, 1]
q = F.normalize(q.view(self.dim, 1), p=2, dim=0)
feat_all = F.normalize(feat_all, p=2, dim=1)
q2all = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
pos_len = torch.sum(labels, dim = 0)
neg_len = q2all.size(0) - pos_len
# pos [P]; neg [Neg]
q2all_pos, q2all_neg = torch.split(q2all, [pos_len, neg_len], dim = 0)
q2all_each_logits = torch.cat([q2all_pos.unsqueeze(-1), q2all_neg.view(1, neg_len).repeat(pos_len, 1)], dim = -1)
# print(q2all_each_logits.size())
q2all_each_logits = q2all_each_logits.view(pos_len, neg_len + 1)
# pos: [b x P, 1]
# neg: [b x p, Neg + (b - 1) * N]ß
logits_labels = torch.zeros([pos_len]).cuda().long()
contras_loss = self.cross_entropy(q2all_each_logits/self.temp, logits_labels)
if self.is_lp:
lp_loss = self.cal_lp_loss(edge_prob, edge_labels)
else:
lp_loss = torch.tensor(0.0).cuda()
outlier_loss = contras_loss + self.lp_weight * lp_loss
scores = torch.mm(feat_all, q.view(self.dim, 1)).squeeze(-1)
return outlier_loss, scores, contras_loss, lp_loss
class GraphCAD(nn.Module):
def __init__(self, logger, args,in_dim, out_dim, total_layer_num, ins_layer_num, is_norm = True, is_edge = True, is_node = True, is_system = True, is_global = True, pooling = "memory"):
super(GraphCAD, self).__init__()
self.total_layer_num = total_layer_num
self.is_edge = is_edge
self.is_node = is_node
self.is_system = is_system
self.in_dim = in_dim
# edge_model
# self.edgemodel = None
if is_edge:
logger.info("EdgeUpdate")
self.edgemodel = EdgeUpdate(is_global, out_dim, 1)
# conv_model
if is_node:
logger.info("NodeUpdate")
self.node_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.node_updates.append(NodeUpdate(out_dim, out_dim, is_norm, ins_layer_num))
# sys_model
if is_system:
logger.info("SystemUpdate")
self.sys_updates = nn.ModuleList()
for _ in range(self.total_layer_num):
self.sys_updates.append(SystemUpdate(out_dim, out_dim, pooling))
self.mlp_head = nn.Sequential(
nn.Linear(out_dim, out_dim),
nn.ReLU(),
nn.Linear(out_dim, out_dim)
)
self.drop_layer = nn.Dropout(0.5)
self.relu = nn.ReLU()
logger.info("is_edge: %r, is_global: %r pooling: %r"%(is_edge, is_global, pooling))
def forward(self, x, edge_index, edge_weight, batch_item, bs):
init_lens = edge_index
# x_pos, x_neg = torch.split(x.view(b, N, C), [P, Neg], dim = 1)
centroid = torch.mean(x.view(bs, -1, self.in_dim), dim = 1)
edge_prob = edge_index
x_trans_loss = x
for index in range(self.total_layer_num):
# edge update
if self.is_edge:
edge_index, edge_weight, edge_prob, x_trans_loss = self.edgemodel(x, edge_index, edge_weight, centroid, batch_item, bs)
# node update
if self.is_node:
x, saved_x = self.node_updates[index](x, edge_index, edge_weight)
# system update
if self.is_system:
centroid = self.sys_updates[index](saved_x, centroid, bs)
x_loss = self.mlp_head(x)
centroid_loss = self.mlp_head(centroid)
final_len = edge_index
return x, edge_weight, centroid, x_loss, centroid_loss, edge_prob
class edgePredictor(nn.Module):
def __init__(self, dim, is_global):
super(edgePredictor, self).__init__()
self.is_global = is_global
self.dim = dim
if is_global:
self.l2r = nn.Sequential(
nn.Linear(3 * dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
else:
self.l2r = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(dim, 1)
# nn.Sigmoid()
)
def forward(self, node_features, edge_index, centroid, bs):
node_features = node_features.view(-1, self.dim)
node_j = node_features[edge_index[0]]
node_i = node_features[edge_index[1]]
if self.is_global:
residual_node_features = (node_features.view(bs, -1, self.dim) - centroid.view(bs, 1, self.dim)).view(-1, self.dim)
residual_node_j = residual_node_features[edge_index[0]]
residual_node_i = residual_node_features[edge_index[1]]
sim_vec = torch.cat((torch.abs(node_i - node_j), residual_node_i, residual_node_j), dim = 1)
else:
sim_vec = torch.abs(node_i - node_j)
prob_score = self.l2r(sim_vec)
return prob_score
class EdgeUpdate(nn.Module):
def __init__(self, is_global, feature_dim, edge_dim, load_dir = None):
super(EdgeUpdate, self).__init__()
self.feature_dim = feature_dim
self.edge_dim = edge_dim
self.temp = 0.6
self.thres_1 = torch.nn.Threshold(0.5, 0)
self.thres_2 = torch.nn.Threshold(-0.49, 1)
self.mins = torch.tensor(1e-10).cuda()
self.relu_fuc = nn.ReLU()
self.edge_skip_alpha = nn.Parameter(torch.rand(1))
self.ep_net = edgePredictor(feature_dim, is_global)
def forward(self, x, edge_index, edge_weight, centroid, batch_item, bs):
pre_prob = self.ep_net(x, edge_index, centroid, bs).squeeze(-1)
pre_adj = torch.sigmoid(pre_prob)
sampled_edge = torch.ones([pre_adj.size(0)]).cuda()
sampled_edge = pyro.distributions.RelaxedBernoulliStraightThrough(temperature=self.temp, probs = pre_adj).rsample()
combine_weight = self.edge_skip_alpha * (sampled_edge * edge_weight) + (1-self.edge_skip_alpha) * (sampled_edge * pre_adj)
return edge_index, combine_weight, pre_adj, x
class NodeUpdate(torch.nn.Module):
def __init__(self, in_channel, out_channel, is_norm, layer_num):
super(NodeUpdate, self).__init__()
self.bn1 = torch.nn.BatchNorm1d(in_channel)
self.conv1 = GINConv(
nn.Sequential(nn.Linear(out_channel, out_channel), nn.ReLU()))
self.drop_layer = nn.Dropout(0.5)
def forward(self, x, edge_index, edge_weight = None):
his_x = []
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = self.bn1(x)
x = self.drop_layer(x)
his_x.append(x)
return x, his_x
class SystemUpdate(nn.Module):
def __init__(self, in_channel, out_channel, pooling):
super(SystemUpdate, self).__init__()
self.in_dim = in_channel
self.out_dim = out_channel
self.pooling = pooling
def forward(self, his_x, init_c, bs):
mem_lens = len(his_x)
if self.pooling == "memory":
for index in range(mem_lens):
tmp_mem = his_x[index].view(bs, -1, self.in_dim)
tmp_score = torch.bmm(tmp_mem, init_c.view(bs, self.in_dim, 1)).view(bs, -1)
tmp_att = F.softmax(tmp_score, dim = 1)
tmp_read = torch.sum(tmp_att.view(bs, -1, 1) * tmp_mem, dim = 1)
init_c = tmp_read
elif self.pooling == "avg":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.mean(x, dim = 1)
elif self.pooling == "sum":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.sum(x, dim = 1)
elif self.pooling == "max":
x = his_x[-1].view(bs, -1, self.in_dim)
init_c = torch.max(x, dim = 1).values
else:
raise ValueError("No such pooling type!")
return init_c
| 9,763 | 37.290196 | 189 | py |
CoordFill | CoordFill-master/test.py | import argparse
import os
import math
from functools import partial
import yaml
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import datasets
import models
import utils
from PIL import Image
from torchvision import transforms
from torchsummary import summary
import numpy as np
def batched_predict(model, inp, coord, bsize):
with torch.no_grad():
model.gen_feat(inp)
n = coord.shape[1]
ql = 0
preds = []
while ql < n:
qr = min(ql + bsize, n)
pred = model.query_rgb(coord[:, ql: qr, :])
preds.append(pred)
ql = qr
pred = torch.cat(preds, dim=1)
return pred, preds
def tensor2PIL(tensor):
# img = tensor.cpu().clone()
# img = img.squeeze(0)
# img = unloader(img)
toPIL = transforms.ToPILImage()
return toPIL(tensor)
def eval_psnr(loader, model, data_norm=None, eval_type=None, eval_bsize=None,
verbose=False):
model.eval()
if data_norm is None:
data_norm = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, 1, -1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, 1, -1).cuda()
if eval_type is None:
metric_fn = utils.calc_psnr
elif eval_type.startswith('div2k'):
scale = int(eval_type.split('-')[1])
metric_fn = partial(utils.calc_psnr, dataset='div2k', scale=scale)
elif eval_type.startswith('benchmark'):
scale = int(eval_type.split('-')[1])
metric_fn = partial(utils.calc_psnr, dataset='benchmark', scale=scale)
else:
raise NotImplementedError
# val_res = utils.Averager()
val_psnr = utils.Averager()
val_ssim = utils.Averager()
val_l1 = utils.Averager()
pbar = tqdm(loader, leave=False, desc='val')
for batch in pbar:
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
if eval_bsize is None:
with torch.no_grad():
# pred = model.encoder.mask_predict([inp, batch['mask']])
pred = model.encoder([inp, batch['mask']])
else:
pred = batched_predict(model, inp, batch['coord'], eval_bsize)
pred = (pred * (1 - batch['mask']) + gt * batch['mask']) * gt_rgb_div + gt_rgb_sub
pred.clamp_(0, 1)
if eval_type is not None: # reshape for shaving-eval
ih, iw = batch['inp'].shape[-2:]
s = math.sqrt(batch['coord'].shape[1] / (ih * iw))
shape = [batch['inp'].shape[0], round(ih * s), round(iw * s), 3]
pred = pred.view(*shape) \
.permute(0, 3, 1, 2).contiguous()
batch['gt'] = batch['gt'].view(*shape) \
.permute(0, 3, 1, 2).contiguous()
psnr, ssim, l1 = metric_fn(model, pred, batch['gt_rgb'])
val_psnr.add(psnr.item(), inp.shape[0])
val_ssim.add(ssim.item(), inp.shape[0])
val_l1.add(l1.item(), inp.shape[0])
if verbose:
pbar.set_description('val psnr{:.4f}'.format(val_psnr.item()))
pbar.set_description('val ssim{:.4f}'.format(val_ssim.item()))
pbar.set_description('val lpips{:.4f}'.format(val_l1.item()))
return val_psnr.item(), val_ssim.item(), val_l1.item()
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--model')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
spec = config['test_dataset']
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
loader = DataLoader(dataset, batch_size=spec['batch_size'],
num_workers=8, pin_memory=True)
model = models.make(config['model']).cuda()
model.encoder.load_state_dict(torch.load(args.model, map_location='cuda:0'))
res = eval_psnr(loader, model,
data_norm=config.get('data_norm'),
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'),
verbose=True)
print('result psnr: {:.6f}'.format(res[0]))
print('result ssim: {:.6f}'.format(res[1]))
print('result lpips: {:.6f}'.format(res[2]))
| 4,752 | 31.333333 | 90 | py |
CoordFill | CoordFill-master/utils.py | import os
import time
import shutil
import math
import torch
import numpy as np
from torch.optim import SGD, Adam
from tensorboardX import SummaryWriter
from skimage.measure import compare_ssim
from skimage.measure import compare_psnr
class Averager():
def __init__(self):
self.n = 0.0
self.v = 0.0
def add(self, v, n=1.0):
self.v = (self.v * self.n + v * n) / (self.n + n)
self.n = self.n + n
def item(self):
return self.v
class Timer():
def __init__(self):
self.v = time.time()
def s(self):
self.v = time.time()
def t(self):
return time.time() - self.v
def time_text(t):
if t >= 3600:
return '{:.1f}h'.format(t / 3600)
elif t >= 60:
return '{:.1f}m'.format(t / 60)
else:
return '{:.1f}s'.format(t)
_log_path = None
def set_log_path(path):
global _log_path
_log_path = path
def log(obj, filename='log.txt'):
print(obj)
if _log_path is not None:
with open(os.path.join(_log_path, filename), 'a') as f:
print(obj, file=f)
def ensure_path(path, remove=True):
basename = os.path.basename(path.rstrip('/'))
if os.path.exists(path):
if remove:
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def set_save_path(save_path, remove=True):
ensure_path(save_path, remove=remove)
set_log_path(save_path)
writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
return log, writer
def compute_num_params(model, text=False):
tot = int(sum([np.prod(p.shape) for p in model.parameters()]))
if text:
if tot >= 1e6:
return '{:.1f}M'.format(tot / 1e6)
else:
return '{:.1f}K'.format(tot / 1e3)
else:
return tot
def make_optimizer(param_list, optimizer_spec, load_sd=False):
Optimizer = {
'sgd': SGD,
'adam': Adam
}[optimizer_spec['name']]
optimizer = Optimizer(param_list, **optimizer_spec['args'])
if load_sd:
optimizer.load_state_dict(optimizer_spec['sd'])
return optimizer
def make_coord(shape, ranges=None, flatten=True):
""" Make coordinates at grid centers.
"""
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
# if flatten:
# ret = ret.view(-1, ret.shape[-1])
return ret
def to_pixel_samples(img):
""" Convert the image to coord-RGB pairs.
img: Tensor, (3, H, W)
"""
coord = make_coord(img.shape[-2:])
rgb = img.view(3, -1).permute(1, 0)
return coord, rgb
def calc_psnr(model, sr, hr, dataset=None, scale=1, rgb_range=1):
pred_batch = (sr.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
gt_batch = (hr.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
psnr = 0
ssim = 0
l1 = 0
lpips = 0
batch_size = sr.size(0)
for i in range(batch_size):
gt, pred = gt_batch[i], pred_batch[i]
psnr += compare_psnr(pred, gt, data_range=255)
ssim += compare_ssim(pred, gt, data_range=255, multichannel=True, win_size=11)
l1 += np.mean(np.abs((np.mean(pred, 2) - np.mean(gt, 2))/255)) * 100
lpips += model.model_LPIPS.forward(im2tensor(pred), im2tensor(gt))
return psnr/batch_size, ssim/batch_size, lpips/batch_size
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
| 3,801 | 24.346667 | 87 | py |
CoordFill | CoordFill-master/train_parallel.py | import argparse
import os
import yaml
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.optim.lr_scheduler import MultiStepLR, LambdaLR
from torchvision import transforms
import random
import datasets
import models
import utils
from test import eval_psnr, batched_predict
import numpy as np
from collections import OrderedDict
from PIL import Image
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def make_data_loader(spec, tag=''):
if spec is None:
return None
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
log('{} dataset: size={}'.format(tag, len(dataset)))
for k, v in dataset[0].items():
log(' {}: shape={}'.format(k, tuple(v.shape)))
# loader = DataLoader(dataset, batch_size=spec['batch_size'],
# shuffle=(tag == 'train'), num_workers=8, pin_memory=True)
sampler = DistributedSampler(dataset, shuffle=(tag == 'train'))
loader = DataLoader(dataset, batch_size=spec['batch_size'], sampler=sampler, num_workers=8, pin_memory=True)
return loader
def make_data_loaders():
train_loader = make_data_loader(config.get('train_dataset'), tag='train')
val_loader = make_data_loader(config.get('val_dataset'), tag='val')
return train_loader, val_loader
def prepare_training():
if config.get('resume') is not None:
sv_file = torch.load(config['resume'])
model = models.make(sv_file['model'], load_sd=True).cuda()
optimizer = utils.make_optimizer(
model.parameters(), sv_file['optimizer'], load_sd=True)
epoch_start = sv_file['epoch'] + 1
else:
model = models.make(config['model']).cuda()
optimizer = utils.make_optimizer(
model.parameters(), config['optimizer'])
epoch_start = 1
max_epoch = config.get('epoch_max')
# lr_scheduler = LambdaLR(optimizer, lr_lambda= lambda epoch: (1-(epoch/max_epoch))**0.9)
lr_scheduler = None
# log('model: #params={}'.format(utils.compute_num_params(model.encoder, text=True)))
log('model: #params={}'.format(utils.compute_num_params(model, text=True)))
return model, optimizer, epoch_start, lr_scheduler
def train(train_loader, model, optimizer):
model.train()
train_loss_G = utils.Averager()
train_loss_D = utils.Averager()
data_norm = config['data_norm']
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
for batch in tqdm(train_loader, leave=False, desc='train'):
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt_rgb = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
model.set_input(inp, gt_rgb, batch['mask'])
model.optimize_parameters()
train_loss_G.add(model.loss_G.item())
# if model.discriminator != None:
train_loss_D.add(model.loss_D.item())
return train_loss_G.item(), train_loss_D.item()
def main(config_, save_path):
global config, log, writer
config = config_
log, writer = utils.set_save_path(save_path, remove=False)
with open(os.path.join(save_path, 'config.yaml'), 'w') as f:
yaml.dump(config, f, sort_keys=False)
train_loader, val_loader = make_data_loaders()
if config.get('data_norm') is None:
config['data_norm'] = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
model, optimizer, epoch_start, lr_scheduler = prepare_training()
n_gpus = 8
if n_gpus > 1:
# model = nn.parallel.DataParallel(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # device_ids will include all GPU devices by default
epoch_max = config['epoch_max']
epoch_val = config.get('epoch_val')
epoch_save = config.get('epoch_save')
max_val_v = -1e18
timer = utils.Timer()
for epoch in range(epoch_start, epoch_max + 1):
t_epoch_start = timer.t()
log_info = ['epoch {}/{}'.format(epoch, epoch_max)]
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
# train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if n_gpus > 1:
train_loss_G, train_loss_D = train(train_loader, model.module, optimizer)
else:
train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if lr_scheduler is not None:
lr_scheduler.step()
log_info.append('train G: loss={:.4f}'.format(train_loss_G))
writer.add_scalars('loss', {'train G': train_loss_G}, epoch)
log_info.append('train D: loss={:.4f}'.format(train_loss_D))
writer.add_scalars('loss', {'train D': train_loss_D}, epoch)
if n_gpus > 1:
model_ = model.module
else:
model_ = model
model_spec = config['model']
model_spec['sd'] = model_.state_dict()
optimizer_spec = config['optimizer']
optimizer_spec['sd'] = optimizer.state_dict()
torch.save(model_.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-last.pth'))
if (epoch_val is not None) and (epoch % epoch_val == 0):
# if n_gpus > 1 and (config.get('eval_bsize') is not None):
if n_gpus > 1:
model_ = model.module
else:
model_ = model
val_psnr, val_ssim, val_lpips = eval_psnr(val_loader, model_,
data_norm=config['data_norm'],
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'))
log_info.append('val: psnr={:.4f}'.format(val_psnr))
writer.add_scalars('psnr', {'val': val_psnr}, epoch)
log_info.append('val: ssim={:.4f}'.format(val_ssim))
writer.add_scalars('ssim', {'val': val_ssim}, epoch)
log_info.append('val: lpips={:.4f}'.format(val_lpips))
writer.add_scalars('lpips', {'val': val_lpips}, epoch)
if val_psnr > max_val_v:
max_val_v = val_psnr
torch.save(model_.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-best.pth'))
t = timer.t()
prog = (epoch - epoch_start + 1) / (epoch_max - epoch_start + 1)
t_epoch = utils.time_text(t - t_epoch_start)
t_elapsed, t_all = utils.time_text(t), utils.time_text(t / prog)
log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all))
log(', '.join(log_info))
writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--name', default=None)
parser.add_argument('--tag', default=None)
parser.add_argument('--local_rank', default=0, type=int,
help='node rank for distributed training')
args = parser.parse_args()
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl")
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print('config loaded.')
save_name = args.name
if save_name is None:
save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]
if args.tag is not None:
save_name = save_name + '_' + args.tag
save_path = os.path.join('./save', save_name)
main(config, save_path)
| 7,851 | 35.52093 | 202 | py |
CoordFill | CoordFill-master/demo.py | import argparse
import os
from PIL import Image
import torch
from torchvision import transforms
import models
def resize_fn(img, size):
return transforms.ToTensor()(
transforms.Resize(size)(
transforms.ToPILImage()(img)))
def to_mask(mask):
return transforms.ToTensor()(
transforms.Grayscale(num_output_channels=1)(
transforms.ToPILImage()(mask)))
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--mask')
parser.add_argument('--config')
parser.add_argument('--model')
parser.add_argument('--resolution')
parser.add_argument('--output', default='output.png')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
img = transforms.ToTensor()(Image.open(args.input).convert('RGB'))
model = models.make(config['model']).cuda()
model.encoder.load_state_dict(torch.load(args.model, map_location='cuda:0'))
h, w = list(map(int, args.resolution.split(',')))
mask = transforms.ToTensor()(Image.open(args.mask).convert('RGB'))
img = resize_fn(img, (h, w))
img = (img - 0.5) / 0.5
mask = resize_fn(mask, (h, w))
mask = to_mask(mask)
mask[mask > 0] = 1
mask = 1 - mask
with torch.no_grad():
pred = model.encoder.mask_predict([img.unsqueeze(0).cuda(), mask.unsqueeze(0).cuda()])
pred = (pred * 0.5 + 0.5).clamp(0, 1).view(3, h, w).cpu()
transforms.ToPILImage()(pred).save(args.output) | 1,668 | 28.280702 | 94 | py |
CoordFill | CoordFill-master/train.py | import argparse
import os
import yaml
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import datasets
import models
import utils
from test import eval_psnr, batched_predict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def make_data_loader(spec, tag=''):
if spec is None:
return None
dataset = datasets.make(spec['dataset'])
dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
log('{} dataset: size={}'.format(tag, len(dataset)))
for k, v in dataset[0].items():
log(' {}: shape={}'.format(k, tuple(v.shape)))
loader = DataLoader(dataset, batch_size=spec['batch_size'],
shuffle=(tag == 'train'), num_workers=8, pin_memory=True)
return loader
def make_data_loaders():
train_loader = make_data_loader(config.get('train_dataset'), tag='train')
val_loader = make_data_loader(config.get('val_dataset'), tag='val')
return train_loader, val_loader
def prepare_training():
if config.get('resume') is not None:
sv_file = torch.load(config['resume'])
model = models.make(sv_file['model'], load_sd=True).cuda()
optimizer = utils.make_optimizer(
model.parameters(), sv_file['optimizer'], load_sd=True)
epoch_start = sv_file['epoch'] + 1
else:
model = models.make(config['model']).cuda()
optimizer = utils.make_optimizer(
model.parameters(), config['optimizer'])
epoch_start = 1
max_epoch = config.get('epoch_max')
lr_scheduler = None
log('model: #params={}'.format(utils.compute_num_params(model, text=True)))
return model, optimizer, epoch_start, lr_scheduler
def train(train_loader, model, optimizer):
model.train()
train_loss_G = utils.Averager()
train_loss_D = utils.Averager()
data_norm = config['data_norm']
t = data_norm['inp']
inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
t = data_norm['gt_rgb']
gt_rgb_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()
gt_rgb_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()
for batch in tqdm(train_loader, leave=False, desc='train'):
for k, v in batch.items():
batch[k] = v.cuda()
inp = (batch['inp'] - inp_sub) / inp_div
gt_rgb = (batch['gt_rgb'] - gt_rgb_sub) / gt_rgb_div
model.set_input(inp, gt_rgb, batch['mask'])
model.optimize_parameters()
train_loss_G.add(model.loss_G.item())
train_loss_D.add(model.loss_D.item())
return train_loss_G.item(), train_loss_D.item()
def main(config_, save_path):
global config, log, writer
config = config_
log, writer = utils.set_save_path(save_path)
with open(os.path.join(save_path, 'config.yaml'), 'w') as f:
yaml.dump(config, f, sort_keys=False)
train_loader, val_loader = make_data_loaders()
if config.get('data_norm') is None:
config['data_norm'] = {
'inp': {'sub': [0], 'div': [1]},
'gt': {'sub': [0], 'div': [1]}
}
model, optimizer, epoch_start, lr_scheduler = prepare_training()
model.optimizer_G = optimizer
model.optimizer_D = optimizer
epoch_max = config['epoch_max']
epoch_val = config.get('epoch_val')
epoch_save = config.get('epoch_save')
max_val_v = -1e18
timer = utils.Timer()
for epoch in range(epoch_start, epoch_max + 1):
t_epoch_start = timer.t()
log_info = ['epoch {}/{}'.format(epoch, epoch_max)]
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
train_loss_G, train_loss_D = train(train_loader, model, optimizer)
if lr_scheduler is not None:
lr_scheduler.step()
log_info.append('train G: loss={:.4f}'.format(train_loss_G))
writer.add_scalars('loss', {'train G': train_loss_G}, epoch)
log_info.append('train D: loss={:.4f}'.format(train_loss_D))
writer.add_scalars('loss', {'train D': train_loss_D}, epoch)
model_ = model
model_spec = config['model']
model_spec['sd'] = model_.state_dict()
optimizer_spec = config['optimizer']
optimizer_spec['sd'] = optimizer.state_dict()
torch.save(model.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-last.pth'))
if (epoch_val is not None) and (epoch % epoch_val == 0):
val_psnr, val_ssim, val_lpips = eval_psnr(val_loader, model_,
data_norm=config['data_norm'],
eval_type=config.get('eval_type'),
eval_bsize=config.get('eval_bsize'))
log_info.append('val: psnr={:.4f}'.format(val_psnr))
writer.add_scalars('psnr', {'val': val_psnr}, epoch)
log_info.append('val: ssim={:.4f}'.format(val_ssim))
writer.add_scalars('ssim', {'val': val_ssim}, epoch)
log_info.append('val: lpips={:.4f}'.format(val_lpips))
writer.add_scalars('lpips', {'val': val_lpips}, epoch)
if val_psnr > max_val_v:
max_val_v = val_psnr
torch.save(model.encoder.state_dict(), os.path.join(save_path, 'encoder-epoch-best.pth'))
t = timer.t()
prog = (epoch - epoch_start + 1) / (epoch_max - epoch_start + 1)
t_epoch = utils.time_text(t - t_epoch_start)
t_elapsed, t_all = utils.time_text(t), utils.time_text(t / prog)
log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all))
log(', '.join(log_info))
writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--name', default=None)
parser.add_argument('--tag', default=None)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print('config loaded.')
save_name = args.name
if save_name is None:
save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]
if args.tag is not None:
save_name = save_name + '_' + args.tag
save_path = os.path.join('./save', save_name)
main(config, save_path)
| 6,360 | 33.570652 | 105 | py |
CoordFill | CoordFill-master/models/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate | 3,218 | 35.579545 | 115 | py |
CoordFill | CoordFill-master/models/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .networks import BaseNetwork
from .networks import get_nonspade_norm_layer
from .networks import MySeparableBilinearDownsample as BilinearDownsample
import torch.nn.utils.spectral_norm as spectral_norm
import torch as th
from math import pi
from math import log2
import time
import math
class CoordFillGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='instanceaffine')
parser.set_defaults(lr_instance=True)
parser.set_defaults(no_instance_dist=True)
parser.set_defaults(hr_coor="cosine")
return parser
def __init__(self, opt, hr_stream=None, lr_stream=None, fast=False):
super(CoordFillGenerator, self).__init__()
if lr_stream is None or hr_stream is None:
lr_stream = dict()
hr_stream = dict()
self.num_inputs = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if (opt.no_instance_edge & opt.no_instance_dist) else 1)
self.lr_instance = opt.lr_instance
self.learned_ds_factor = opt.learned_ds_factor #(S2 in sec. 3.2)
self.gpu_ids = opt.gpu_ids
self.downsampling = opt.crop_size // opt.ds_scale
self.highres_stream = PixelQueryNet(self.downsampling, num_inputs=self.num_inputs,
num_outputs=opt.output_nc, width=opt.hr_width,
depth=opt.hr_depth,
no_one_hot=opt.no_one_hot, lr_instance=opt.lr_instance,
**hr_stream)
num_params = self.highres_stream.num_params
self.lowres_stream = ParaGenNet(num_params, scale_injection=opt.scale_injection)
def use_gpu(self):
return len(self.gpu_ids) > 0
def get_lowres(self, im):
"""Creates a lowres version of the input."""
device = self.use_gpu()
if(self.learned_ds_factor != self.downsampling):
myds = BilinearDownsample(int(self.downsampling//self.learned_ds_factor), self.num_inputs,device)
return myds(im)
else:
return im
def forward(self, highres):
lowres = self.get_lowres(highres)
lr_features = self.lowres_stream(lowres)
output = self.highres_stream(highres, lr_features)
return output, lr_features#, lowres
def _get_coords(bs, h, w, device, ds):
"""Creates the position encoding for the pixel-wise MLPs"""
x = th.arange(0, w).float()
y = th.arange(0, h).float()
scale = 7 / 8
x_cos = th.remainder(x, ds).float() / ds
x_sin = th.remainder(x, ds).float() / ds
y_cos = th.remainder(y, ds).float() / ds
y_sin = th.remainder(y, ds).float() / ds
x_cos = x_cos / (max(x_cos) / scale)
x_sin = x_sin / (max(x_sin) / scale)
y_cos = x_cos / (max(y_cos) / scale)
y_sin = x_cos / (max(y_sin) / scale)
xcos = th.cos((2 * pi * x_cos).float())
xsin = th.sin((2 * pi * x_sin).float())
ycos = th.cos((2 * pi * y_cos).float())
ysin = th.sin((2 * pi * y_sin).float())
xcos = xcos.view(1, 1, 1, w).repeat(bs, 1, h, 1)
xsin = xsin.view(1, 1, 1, w).repeat(bs, 1, h, 1)
ycos = ycos.view(1, 1, h, 1).repeat(bs, 1, 1, w)
ysin = ysin.view(1, 1, h, 1).repeat(bs, 1, 1, w)
coords = th.cat([xcos, xsin, ycos, ysin], 1).to(device)
return coords.to(device)
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
class ParaGenNet(th.nn.Module):
"""Convolutional LR stream to estimate the pixel-wise MLPs parameters"""
def __init__(self, num_out, scale_injection=False):
super(ParaGenNet, self).__init__()
self.num_out = num_out
self.scale_injection = scale_injection
ngf = 64
if self.scale_injection:
self.out_para = nn.Sequential(
th.nn.Linear(ngf * 8 + 1, self.num_out)
)
else:
self.out_para = nn.Sequential(
th.nn.Linear(ngf * 8, self.num_out)
)
def forward(self, model, x, x_hr):
structure = model(x)
if self.scale_injection:
scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \
.to(structure.device)
scale = scale.repeat(1, structure.size(2), structure.size(3), 1)
structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)
para = self.out_para(structure).permute(0, 3, 1, 2)
else:
para = self.out_para(structure.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return para
def mask_predict(self, model, x, x_hr, mask):
structure = model(x)
if self.scale_injection:
scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \
.to(structure.device)
scale = scale.repeat(1, structure.size(2), structure.size(3), 1)
structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)
else:
structure = structure.permute(0, 2, 3, 1)
bs, h, w, c = structure.size()
k = mask.size(2) // h
mask = mask.unfold(2, k, k).unfold(3, k, k)
mask = mask.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h, w, int(k * k))
lr_mask = torch.mean(mask, dim=-1).view(h * w)
structure = structure.view(bs, h * w, c)
index = torch.nonzero(1 - lr_mask).squeeze(1)
structure = structure[:, index, :]
para = self.out_para(structure).permute(0, 2, 1)
return para, mask
class PixelQueryNet(th.nn.Module):
"""Addaptive pixel-wise MLPs"""
def __init__(self, downsampling,
num_inputs=13, num_outputs=3, width=64, depth=5, coordinates="cosine",
no_one_hot=False, lr_instance=False):
super(PixelQueryNet, self).__init__()
self.lr_instance = lr_instance
self.downsampling = downsampling
self.num_inputs = num_inputs - (1 if self.lr_instance else 0)
self.num_outputs = num_outputs
self.width = width
self.depth = depth
self.coordinates = coordinates
self.xy_coords = None
self.no_one_hot = no_one_hot
self.channels = []
self._set_channels()
self.num_params = 0
self.splits = {}
self._set_num_params()
@property # for backward compatibility
def ds(self):
return self.downsampling
def _set_channels(self):
"""Compute and store the hr-stream layer dimensions."""
in_ch = self.num_inputs
in_ch = in_ch + int(4)
self.channels = [in_ch]
for _ in range(self.depth - 1): # intermediate layer -> cste size
self.channels.append(self.width)
# output layer
self.channels.append(self.num_outputs)
def _set_num_params(self):
nparams = 0
self.splits = {
"biases": [],
"weights": [],
}
# # go over input/output channels for each layer
idx = 0
for layer, nci in enumerate(self.channels[:-1]):
nco = self.channels[layer + 1]
nparams = nparams + nco # FC biases
self.splits["biases"].append((idx, idx + nco))
idx = idx + nco
nparams = nparams + nci * nco # FC weights
self.splits["weights"].append((idx, idx + nco * nci))
idx = idx + nco * nci
self.num_params = nparams
def _get_weight_indices(self, idx):
return self.splits["weights"][idx]
def _get_bias_indices(self, idx):
return self.splits["biases"][idx]
def forward(self, highres, lr_params):
assert lr_params.shape[1] == self.num_params, "incorrect input params"
if self.lr_instance:
highres = highres[:, :-1, :, :]
# Fetch sizes
bs, _, h, w = highres.shape
bs, _, h_lr, w_lr = lr_params.shape
k = h // h_lr
self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)
highres = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)
# Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),
# with channels last (for matmul)
# all pixels within a tile of kxk are processed by the same MLPs parameters
nci = highres.shape[1]
tiles = highres.unfold(2, k, k).unfold(3, k, k)
tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), nci)
out = tiles
num_layers = len(self.channels) - 1
for idx, nci in enumerate(self.channels[:-1]):
nco = self.channels[idx + 1]
# Select params in lowres buffer
bstart, bstop = self._get_bias_indices(idx)
wstart, wstop = self._get_weight_indices(idx)
w_ = lr_params[:, wstart:wstop]
b_ = lr_params[:, bstart:bstop]
w_ = w_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, nci, nco)
b_ = b_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, 1, nco)
out = th.matmul(out, w_) + b_
# Apply RelU non-linearity in all but the last layer, and tanh in the last
# out = th.nn.functional.leaky_relu(out, 0.01)
if idx < num_layers - 1:
out = th.nn.functional.leaky_relu(out, 0.01)
else:
out = torch.tanh(out)
#
# reorder the tiles in their correct position, and put channels first
out = out.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(
0, 5, 1, 3, 2, 4)
out = out.contiguous().view(bs, self.num_outputs, h, w)
return out
def mask_predict(self, highres, lr_params, hr_mask, lr_mask):
assert lr_params.shape[1] == self.num_params, "incorrect input params"
if self.lr_instance:
highres = highres[:, :-1, :, :]
bs, _, h, w = highres.shape
bs, h_lr, w_lr, _ = lr_mask.shape
k = h // h_lr
self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)
pe = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)
# Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),
# with channels last (for matmul)
# all pixels within a tile of kxk are processed by the same MLPs parameters
nci = pe.shape[1]
# bs, 5 rgbxy, h//k=h_lr, w//k=w_lr, k, k
tiles = pe.unfold(2, k, k).unfold(3, k, k)
tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), nci)
mask = torch.mean(lr_mask, dim=-1).view(h_lr * w_lr)
index = torch.nonzero(1 - mask).squeeze(1)
out = tiles
num_layers = len(self.channels) - 1
out = out.view(bs, h_lr * w_lr, int(k * k), nci)[:, index, :, :]
num = out.size(1)
for idx, nci in enumerate(self.channels[:-1]):
nco = self.channels[idx + 1]
# Select params in lowres buffer
bstart, bstop = self._get_bias_indices(idx)
wstart, wstop = self._get_weight_indices(idx)
w_ = lr_params[:, wstart:wstop]
b_ = lr_params[:, bstart:bstop]
w_ = w_.permute(0, 2, 1).view(bs, num, nci, nco)
b_ = b_.permute(0, 2, 1).view(bs, num, 1, nco)
out = th.matmul(out, w_) + b_
# Apply RelU non-linearity in all but the last layer, and tanh in the last
if idx < num_layers - 1:
out = th.nn.functional.leaky_relu(out, 0.01)
else:
out = torch.tanh(out)
highres = highres.unfold(2, k, k).unfold(3, k, k)
highres = highres.permute(0, 2, 3, 4, 5, 1).contiguous().view(
bs, h_lr, w_lr, int(k * k), 3).view(bs, h_lr * w_lr, int(k * k), 3)
highres[:, index, :, :] = out
out = highres.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(
0, 5, 1, 3, 2, 4)
out = out.contiguous().view(bs, self.num_outputs, h, w)
return out | 12,294 | 36.257576 | 143 | py |
CoordFill | CoordFill-master/models/misc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import models
from models import register
from utils import make_coord
@register('metasr')
class MetaSR(nn.Module):
def __init__(self, encoder_spec):
super().__init__()
self.encoder = models.make(encoder_spec)
imnet_spec = {
'name': 'mlp',
'args': {
'in_dim': 3,
'out_dim': self.encoder.out_dim * 9 * 3,
'hidden_list': [256]
}
}
self.imnet = models.make(imnet_spec)
def gen_feat(self, inp):
self.feat = self.encoder(inp)
return self.feat
def query_rgb(self, coord, cell=None):
feat = self.feat
feat = F.unfold(feat, 3, padding=1).view(
feat.shape[0], feat.shape[1] * 9, feat.shape[2], feat.shape[3])
feat_coord = make_coord(feat.shape[-2:], flatten=False).cuda()
feat_coord[:, :, 0] -= (2 / feat.shape[-2]) / 2
feat_coord[:, :, 1] -= (2 / feat.shape[-1]) / 2
feat_coord = feat_coord.permute(2, 0, 1) \
.unsqueeze(0).expand(feat.shape[0], 2, *feat.shape[-2:])
coord_ = coord.clone()
coord_[:, :, 0] -= cell[:, :, 0] / 2
coord_[:, :, 1] -= cell[:, :, 1] / 2
coord_q = (coord_ + 1e-6).clamp(-1 + 1e-6, 1 - 1e-6)
q_feat = F.grid_sample(
feat, coord_q.flip(-1).unsqueeze(1),
mode='nearest', align_corners=False)[:, :, 0, :] \
.permute(0, 2, 1)
q_coord = F.grid_sample(
feat_coord, coord_q.flip(-1).unsqueeze(1),
mode='nearest', align_corners=False)[:, :, 0, :] \
.permute(0, 2, 1)
rel_coord = coord_ - q_coord
rel_coord[:, :, 0] *= feat.shape[-2] / 2
rel_coord[:, :, 1] *= feat.shape[-1] / 2
r_rev = cell[:, :, 0] * (feat.shape[-2] / 2)
inp = torch.cat([rel_coord, r_rev.unsqueeze(-1)], dim=-1)
bs, q = coord.shape[:2]
pred = self.imnet(inp.view(bs * q, -1)).view(bs * q, feat.shape[1], 3)
pred = torch.bmm(q_feat.contiguous().view(bs * q, 1, -1), pred)
pred = pred.view(bs, q, 3)
return pred
def forward(self, inp, coord, cell):
self.gen_feat(inp)
return self.query_rgb(coord, cell)
| 2,303 | 31.450704 | 78 | py |
CoordFill | CoordFill-master/models/gan.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import models
from models import register
import math
import numpy as np
from torch.autograd import Variable
import os
import logging
logger = logging.getLogger(__name__)
from .coordfill import CoordFill
from .ffc_baseline import FFC
from .adv_loss import AdversarialLoss
from collections import OrderedDict
from .LPIPS.models import dist_model as dm
import random
class D_Net(nn.Module):
def __init__(self, in_channels=3, use_sigmoid=True, use_spectral_norm=True):
super(D_Net, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv2 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv3 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv4 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
nn.LeakyReLU(0.2),
)
self.conv5 = nn.Sequential(
spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm),
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
outputs = conv5
if self.use_sigmoid:
outputs = torch.sigmoid(conv5)
return outputs, [conv1, conv2, conv3, conv4]
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
@register('gan')
class GAN(nn.Module):
def __init__(self, encoder_spec=None):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from argparse import Namespace
args = Namespace()
args.n_channels = 3
args.n_classes = 3
args.no_upsampling = True
self.mode = encoder_spec['name']
if encoder_spec['name'] == 'baseline':
self.encoder = Baseline(args)
elif encoder_spec['name'] == 'ffc' or encoder_spec['name'] == 'mlp':
self.encoder = FFC(args, encoder_spec['name'], encoder_spec['mask_prediction'])
else:
self.encoder = CoordFill(args, encoder_spec['name'],
encoder_spec['mask_prediction'], encoder_spec['attffc'],
encoder_spec['scale_injection'])
self.model_LPIPS = dm.DistModel()
self.model_LPIPS.initialize(model='net-lin', net='alex', use_gpu=True)
self.fm_loss = torch.nn.L1Loss()
self.discriminator = D_Net(use_sigmoid=True)
self.criterionGAN = AdversarialLoss('nsgan')
self.lambda_D = 1
self.lambda_perceptual = 10
self.lambda_fm = 100
self.multi_res_training = encoder_spec['multi_res_training']
self.optimizer_G = torch.optim.Adam(self.encoder.parameters(), lr=1e-4)
self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=1e-4)
def set_input(self, inp, gt, input_mask):
self.input = inp.to(self.device)
self.gt = gt.to(self.device)
self.input_mask = input_mask.to(self.device)
if self.multi_res_training:
ratio = random.randint(0, 8)
size = 256 + 32 * ratio
self.input = F.interpolate(self.input, size=(size, size), mode='bilinear')
self.gt = F.interpolate(self.gt, size=(size, size), mode='bilinear')
self.input_mask = F.interpolate(self.input_mask, size=(size, size), mode='nearest')
def forward(self):
self.pred = self.encoder([self.input, self.input_mask])
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
pred_fake, _ = self.discriminator(self.pred.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False, True)
pred_real, _ = self.discriminator(self.gt)
self.loss_D_real = self.criterionGAN(pred_real, True, True)
self.loss_D = self.loss_D_fake + self.loss_D_real
# combine loss and calculate gradients
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
pred_fake, feat_fake = self.discriminator(self.pred)
self.loss_GAN = self.criterionGAN(pred_fake, True, False) * self.lambda_D
self.feat_match_loss = 0
pred_real_hr, feat_real = self.discriminator(self.gt)
for i in range(len(feat_fake)):
self.feat_match_loss += self.fm_loss(feat_fake[i], feat_real[i].detach())
self.feat_match_loss = self.feat_match_loss * self.lambda_fm
self.loss_LPIPS, _ = self.model_LPIPS.forward_pair(self.pred, self.gt)
self.loss_perceptual = torch.mean(self.loss_LPIPS) * self.lambda_perceptual
self.loss_G = self.loss_perceptual + self.loss_GAN + self.feat_match_loss
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
self.set_requires_grad(self.discriminator, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.set_requires_grad(self.discriminator, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 6,804 | 36.185792 | 162 | py |
CoordFill | CoordFill-master/models/networks.py | import torch.nn as nn
from torch.nn import init
import torch.nn.utils.spectral_norm as spectral_norm
import torch
import torch.nn.functional as F
import functools
import numpy as np
class MySeparableBilinearDownsample(torch.nn.Module):
def __init__(self, stride, channels, use_gpu):
super().__init__()
self.stride = stride
self.channels = channels
# create tent kernel
kernel = np.arange(1,2*stride+1,2) # ramp up
kernel = np.concatenate((kernel,kernel[::-1])) # reflect it and concatenate
if use_gpu:
kernel = torch.Tensor(kernel/np.sum(kernel)).to(device='cuda') # normalize
else:
kernel = torch.Tensor(kernel / np.sum(kernel))
self.register_buffer('kernel_horz', kernel[None,None,None,:].repeat((self.channels,1,1,1)))
self.register_buffer('kernel_vert', kernel[None,None,:,None].repeat((self.channels,1,1,1)))
self.refl = nn.ReflectionPad2d(int(stride/2))#nn.ReflectionPad2d(int(stride/2))
def forward(self, input):
return F.conv2d(F.conv2d(self.refl(input), self.kernel_horz, stride=(1,self.stride), groups=self.channels),
self.kernel_vert, stride=(self.stride,1), groups=self.channels)
class ASAPNetsBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(), kernel_size=3, reflection_pad=False, replicate_pad=False):
super().__init__()
padw = 1
if reflection_pad:
self.conv_block = nn.Sequential(nn.ReflectionPad2d(padw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=0)),
activation
)
elif replicate_pad:
self.conv_block = nn.Sequential(nn.ReplicationPad2d(padw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=0)),
activation
)
else:
self.conv_block = nn.Sequential(norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=padw)),
activation
)
def forward(self, x):
out = self.conv_block(x)
return out
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type = norm_type
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'spectral':
norm_layer = torch.nn.utils.spectral_norm(get_out_channel(layer))
# elif subnorm_type == 'sync_batch':
# norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
elif subnorm_type == 'instanceaffine':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=True)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params = num_params + param.numel()
print('Network [%s] was created. Total number of parameters: %.1f million. '
'To see the architecture, do print(network).'
% (type(self).__name__, num_params / 1000000))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init.normal_(m.weight.data, 1.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
'''
for name, param in m.named_parameters():
if (name == "lowres_stream.params_pred.weight"):
print("%s_init" % name)
init.zeros_(param.data[0:13])
init.normal_(param.data[13:13 + 64 * 64], 0.0, 0.02)
for i in range(1,6):
init.zeros_(param.data[13+i*64*64+(i-1)*64:13+64*64+i*64])
init.normal_(param.data[13+i*64*64+i*64:13+i*64+(i+1)*64*64], 0.0, 0.02)
init.zeros_(param.data[13 + i * 64 * 64 + (i - 1) * 64:13 + 64 * 64 + i * 64 + 3])
init.normal_(param.data[13 + i * 64 * 64 + i * 64 + 3 :13 + i * 64 + i * 64 * 64 +64*3], 0.0, 0.02)
if (name == "lowres_stream.params_pred.bias"):
print("%s_init" % name)
init.zeros_(param.data)
'''
self.apply(init_func)
# propagate to children
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain) | 7,259 | 43 | 120 | py |
CoordFill | CoordFill-master/models/ffc.py | # Fast Fourier Convolution NeurIPS 2020
# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py
# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
# from saicinpainting.training.modules.base import get_activation, BaseDiscriminator
# from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper
# from saicinpainting.training.modules.squeeze_excitation import SELayer
# from saicinpainting.utils import get_shape
class FFCSE_block(nn.Module):
def __init__(self, channels, ratio_g):
super(FFCSE_block, self).__init__()
in_cg = int(channels * ratio_g)
in_cl = channels - in_cg
r = 16
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.conv1 = nn.Conv2d(channels, channels // r,
kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv_a2l = None if in_cl == 0 else nn.Conv2d(
channels // r, in_cl, kernel_size=1, bias=True)
self.conv_a2g = None if in_cg == 0 else nn.Conv2d(
channels // r, in_cg, kernel_size=1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x if type(x) is tuple else (x, 0)
id_l, id_g = x
x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1)
x = self.avgpool(x)
x = self.relu1(self.conv1(x))
x_l = 0 if self.conv_a2l is None else id_l * \
self.sigmoid(self.conv_a2l(x))
x_g = 0 if self.conv_a2g is None else id_g * \
self.sigmoid(self.conv_a2g(x))
return x_l, x_g
class FourierUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear',
spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'):
# bn_layer not used
super(FourierUnit, self).__init__()
self.groups = groups
self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
out_channels=out_channels * 2,
kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels * 2)
self.relu = torch.nn.ReLU()
# squeeze and excitation block
self.use_se = use_se
if use_se:
if se_kwargs is None:
se_kwargs = {}
self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
self.spatial_scale_factor = spatial_scale_factor
self.spatial_scale_mode = spatial_scale_mode
self.spectral_pos_encoding = spectral_pos_encoding
self.ffc3d = ffc3d
self.fft_norm = fft_norm
def forward(self, x):
batch = x.shape[0]
if self.spatial_scale_factor is not None:
orig_size = x.shape[-2:]
x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False)
r_size = x.size()
# (batch, c, h, w/2+1, 2)
fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
ffted = ffted.view((batch, -1,) + ffted.size()[3:])
if self.spectral_pos_encoding:
height, width = ffted.shape[-2:]
coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted)
coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted)
ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
if self.use_se:
ffted = self.se(ffted)
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
# ffted = self.relu(self.bn(ffted))
ffted = self.relu(ffted)
ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm)
if self.spatial_scale_factor is not None:
output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False)
return output
class SpectralTransform(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs):
# bn_layer not used
super(SpectralTransform, self).__init__()
self.enable_lfu = enable_lfu
if stride == 2:
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
else:
self.downsample = nn.Identity()
self.stride = stride
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels //
2, kernel_size=1, groups=groups, bias=False),
# nn.BatchNorm2d(out_channels // 2),
nn.ReLU()
)
self.fu = FourierUnit(
out_channels // 2, out_channels // 2, groups, **fu_kwargs)
if self.enable_lfu:
self.lfu = FourierUnit(
out_channels // 2, out_channels // 2, groups)
self.conv2 = torch.nn.Conv2d(
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False)
def forward(self, x):
x = self.downsample(x)
x = self.conv1(x)
output = self.fu(x)
if self.enable_lfu:
n, c, h, w = x.shape
split_no = 2
split_s = h // split_no
xs = torch.cat(torch.split(
x[:, :c // 4], split_s, dim=-2), dim=1).contiguous()
xs = torch.cat(torch.split(xs, split_s, dim=-1),
dim=1).contiguous()
xs = self.lfu(xs)
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
else:
xs = 0
output = self.conv2(x + output + xs)
return output
class FFC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
ratio_gin, ratio_gout, stride=1, padding=0,
dilation=1, groups=1, bias=False, enable_lfu=True,
padding_type='reflect', gated=False, **spectral_kwargs):
super(FFC, self).__init__()
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
self.stride = stride
in_cg = int(in_channels * ratio_gin)
in_cl = in_channels - in_cg
out_cg = int(out_channels * ratio_gout)
out_cl = out_channels - out_cg
#groups_g = 1 if groups == 1 else int(groups * ratio_gout)
#groups_l = 1 if groups == 1 else groups - groups_g
self.ratio_gin = ratio_gin
self.ratio_gout = ratio_gout
self.global_in_num = in_cg
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
self.convl2l = module(in_cl, out_cl, kernel_size,
stride, padding, dilation, groups, bias, padding_mode=padding_type)
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
self.convl2g = module(in_cl, out_cg, kernel_size,
stride, padding, dilation, groups, bias, padding_mode=padding_type)
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
self.convg2l = module(in_cg, out_cl, kernel_size,
stride, padding, dilation, groups, bias, padding_mode=padding_type)
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
self.convg2g = module(
in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs)
self.gated = gated
module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
self.gate = module(in_channels, 2, 1)
def forward(self, x):
x_l, x_g = x if type(x) is tuple else (x, 0)
out_xl, out_xg = 0, 0
if self.gated:
total_input_parts = [x_l]
if torch.is_tensor(x_g):
total_input_parts.append(x_g)
total_input = torch.cat(total_input_parts, dim=1)
gates = torch.sigmoid(self.gate(total_input))
g2l_gate, l2g_gate = gates.chunk(2, dim=1)
else:
g2l_gate, l2g_gate = 1, 1
if self.ratio_gout != 1:
out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
if self.ratio_gout != 0:
out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g)
return out_xl, out_xg
class FFC_BN_ACT(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size, ratio_gin, ratio_gout,
stride=1, padding=0, dilation=1, groups=1, bias=False,
norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity,
padding_type='reflect',
enable_lfu=True, **kwargs):
super(FFC_BN_ACT, self).__init__()
self.ffc = FFC(in_channels, out_channels, kernel_size,
ratio_gin, ratio_gout, stride, padding, dilation,
groups, bias, enable_lfu, padding_type=padding_type, **kwargs)
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
global_channels = int(out_channels * ratio_gout)
self.bn_l = lnorm(out_channels - global_channels)
self.bn_g = gnorm(global_channels)
lact = nn.Identity if ratio_gout == 1 else activation_layer
gact = nn.Identity if ratio_gout == 0 else activation_layer
self.act_l = lact()
self.act_g = gact()
def forward(self, x):
x_l, x_g = self.ffc(x)
# x_l = self.act_l(self.bn_l(x_l))
# x_g = self.act_g(self.bn_g(x_g))
x_l = self.act_l(x_l)
x_g = self.act_g(x_g)
return x_l, x_g
class FFCResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1,
spatial_transform_kwargs=None, inline=False, **conv_kwargs):
super().__init__()
self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
**conv_kwargs)
self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=1, dilation=1,
norm_layer=norm_layer,
activation_layer=nn.Identity,
padding_type=padding_type,
**conv_kwargs)
if spatial_transform_kwargs is not None:
self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs)
self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs)
self.inline = inline
def forward(self, x):
if self.inline:
x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:]
else:
x_l, x_g = x if type(x) is tuple else (x, 0)
id_l, id_g = x_l, x_g
x_l, x_g = self.conv1((x_l, x_g))
x_l, x_g = self.conv2((x_l, x_g))
x_l, x_g = id_l + x_l, id_g + x_g
out = x_l, x_g
if self.inline:
out = torch.cat(out, dim=1)
return out
class ConcatTupleLayer(nn.Module):
def forward(self, x):
assert isinstance(x, tuple)
x_l, x_g = x
assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
if not torch.is_tensor(x_g):
return x_l
return torch.cat(x, dim=1)
class FFCResNetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, res_dilation=2, norm_layer=nn.BatchNorm2d,
padding_type='reflect', activation_layer=nn.ReLU,
up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True),
# init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={},
init_conv_kwargs={"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False},
downsample_conv_kwargs={"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False},
resnet_conv_kwargs={"ratio_gin": 0.75, "ratio_gout": 0.75, "enable_lfu": False},
spatial_transform_layers=None, spatial_transform_kwargs={},
add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}, decode=True):
assert (n_blocks >= 0)
super().__init__()
model = [nn.ReflectionPad2d(3),
FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer,
activation_layer=activation_layer, **init_conv_kwargs)]
kw = 4
### downsample
for i in range(n_downsampling):
mult = 2 ** i
if i == n_downsampling - 1:
cur_conv_kwargs = dict(downsample_conv_kwargs)
cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0)
else:
cur_conv_kwargs = downsample_conv_kwargs
model = model + [FFC_BN_ACT(min(max_features, ngf * mult),
min(max_features, ngf * mult * 2),
kernel_size=kw, stride=2, padding=1,
norm_layer=norm_layer,
activation_layer=activation_layer,
**cur_conv_kwargs)]
mult = 2 ** n_downsampling
feats_num_bottleneck = min(max_features, ngf * mult)
### resnet blocks
for i in range(n_blocks):
cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer,
norm_layer=norm_layer, dilation=res_dilation, **resnet_conv_kwargs)
if spatial_transform_layers is not None and i in spatial_transform_layers:
cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs)
model = model + [cur_resblock]
model = model + [ConcatTupleLayer()]
self.model = nn.Sequential(*model)
self.encoder = self.model
self.decode = decode
model = []
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model = model + [
# nn.ConvTranspose2d(min(max_features, ngf * mult),
# min(max_features, int(ngf * mult / 2)),
# kernel_size=3, stride=2, padding=1, output_padding=1),
# up_norm_layer(min(max_features, int(ngf * mult / 2))),
nn.ConvTranspose2d(min(max_features, ngf * mult),
min(max_features, int(ngf * mult / 2)),
kernel_size=kw, stride=2, padding=1),
up_activation]
if out_ffc:
model = model + [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer,
norm_layer=norm_layer, inline=True, **out_ffc_kwargs)]
model = model + [nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
if add_out_act:
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
self.model = nn.Sequential(*model)
self.decoder = self.model
def forward(self, input):
output = self.encoder(input)
if self.decode:
output = self.decoder(output)
return output
import abc
from typing import Tuple, List
class BaseDiscriminator(nn.Module):
@abc.abstractmethod
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
Predict scores and get intermediate activations. Useful for feature matching loss
:return tuple (scores, list of intermediate activations)
"""
raise NotImplemented()
class FFCNLayerDiscriminator(BaseDiscriminator):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512,
use_sigmoid=True,
# init_conv_kwargs={}, conv_kwargs={}
init_conv_kwargs = {"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False},
conv_kwargs={"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False}):
super().__init__()
self.n_layers = n_layers
self.use_sigmoid = use_sigmoid
def _act_ctor():
return nn.LeakyReLU(negative_slope=0.2)
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, stride=2, padding=padw, norm_layer=norm_layer,
activation_layer=_act_ctor, **init_conv_kwargs)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, max_features)
cur_model = [
FFC_BN_ACT(nf_prev, nf,
kernel_size=kw, stride=2, padding=padw,
norm_layer=norm_layer,
activation_layer=_act_ctor,
**conv_kwargs)
]
sequence.append(cur_model)
nf_prev = nf
nf = min(nf * 2, 512)
cur_model = [
FFC_BN_ACT(nf_prev, nf,
kernel_size=kw, stride=1, padding=padw,
norm_layer=norm_layer,
activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs),
**conv_kwargs),
ConcatTupleLayer()
]
sequence.append(cur_model)
sequence = sequence + [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
def get_all_activations(self, x):
res = [x]
for n in range(self.n_layers + 2):
model = getattr(self, 'model' + str(n))
res.append(model(res[-1]))
return res[1:]
def forward(self, x):
act = self.get_all_activations(x)
feats = []
for out in act[:-1]:
if isinstance(out, tuple):
if torch.is_tensor(out[1]):
out = torch.cat(out, dim=1)
else:
out = out[0]
feats.append(out)
outputs = act[-1]
if self.use_sigmoid:
outputs = torch.sigmoid(act[-1])
# return outputs
return outputs, feats
from kornia.geometry.transform import rotate
class LearnableSpatialTransformWrapper(nn.Module):
def __init__(self, impl, pad_coef=0.5, angle_init_range=80, train_angle=True):
super().__init__()
self.impl = impl
self.angle = torch.rand(1) * angle_init_range
if train_angle:
self.angle = nn.Parameter(self.angle, requires_grad=True)
self.pad_coef = pad_coef
def forward(self, x):
if torch.is_tensor(x):
return self.inverse_transform(self.impl(self.transform(x)), x)
elif isinstance(x, tuple):
x_trans = tuple(self.transform(elem) for elem in x)
y_trans = self.impl(x_trans)
return tuple(self.inverse_transform(elem, orig_x) for elem, orig_x in zip(y_trans, x))
else:
raise ValueError(f'Unexpected input type {type(x)}')
def transform(self, x):
height, width = x.shape[2:]
pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
x_padded = F.pad(x, [pad_w, pad_w, pad_h, pad_h], mode='reflect')
x_padded_rotated = rotate(x_padded, angle=self.angle.to(x_padded))
return x_padded_rotated
def inverse_transform(self, y_padded_rotated, orig_x):
height, width = orig_x.shape[2:]
pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
y_padded = rotate(y_padded_rotated, angle=-self.angle.to(y_padded_rotated))
y_height, y_width = y_padded.shape[2:]
y = y_padded[:, :, pad_h : y_height - pad_h, pad_w : y_width - pad_w]
return y
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
res = x * y.expand_as(x)
return res
def get_activation(kind='tanh'):
if kind == 'tanh':
return nn.Tanh()
if kind == 'sigmoid':
return nn.Sigmoid()
if kind is False:
return nn.Identity()
raise ValueError(f'Unknown activation kind {kind}')
import numbers
def get_shape(t):
if torch.is_tensor(t):
return tuple(t.shape)
elif isinstance(t, dict):
return {n: get_shape(q) for n, q in t.items()}
elif isinstance(t, (list, tuple)):
return [get_shape(q) for q in t]
elif isinstance(t, numbers.Number):
return type(t)
else:
raise ValueError('unexpected type {}'.format(type(t))) | 22,247 | 39.014388 | 125 | py |
CoordFill | CoordFill-master/models/sync_batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'set_sbn_eps_mode',
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
SBN_EPS_MODE = 'clamp'
def set_sbn_eps_mode(mode):
global SBN_EPS_MODE
assert mode in ('clamp', 'plus')
SBN_EPS_MODE = mode
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats)
if not self.track_running_stats:
import warnings
warnings.warn('track_running_stats=False is not supported by the SynchronizedBatchNorm.')
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
assert input.size(1) == self.num_features, 'Channel size mismatch: got {}, expect {}.'.format(input.size(1), self.num_features)
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
if SBN_EPS_MODE == 'clamp':
return mean, bias_var.clamp(self.eps) ** -0.5
elif SBN_EPS_MODE == 'plus':
return mean, (bias_var + self.eps) ** -0.5
else:
raise ValueError('Unknown EPS mode: {}.'.format(SBN_EPS_MODE))
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
# >>> # With Learnable Parameters
# >>> m = SynchronizedBatchNorm1d(100)
# >>> # Without Learnable Parameters
# >>> m = SynchronizedBatchNorm1d(100, affine=False)
# >>> input = torch.autograd.Variable(torch.randn(20, 100))
# >>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
# >>> # With Learnable Parameters
# >>> m = SynchronizedBatchNorm2d(100)
# >>> # Without Learnable Parameters
# >>> m = SynchronizedBatchNorm2d(100, affine=False)
# >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
# >>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
# >>> # With Learnable Parameters
# >>> m = SynchronizedBatchNorm3d(100)
# >>> # Without Learnable Parameters
# >>> m = SynchronizedBatchNorm3d(100, affine=False)
# >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
# >>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
# >>> import torch.nn as nn
# >>> import torchvision
# >>> # m is a standard pytorch model
# >>> m = torchvision.models.resnet18(True)
# >>> m = nn.DataParallel(m)
# >>> # after convert, m is using SyncBN
# >>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod, device_ids=module.device_ids)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod | 16,476 | 43.05615 | 135 | py |
CoordFill | CoordFill-master/models/adv_loss.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch import autograd
import torchvision.models as models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AdversarialLoss(nn.Module):
"""
Adversarial loss
https://arxiv.org/abs/1711.10337
"""
def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0):
"""
type = nsgan | lsgan | hinge
"""
super(AdversarialLoss, self).__init__()
self.type = type
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if type == 'nsgan':
self.criterion = nn.BCELoss()
if type == 'relativegan':
self.criterion = nn.BCEWithLogitsLoss()
elif type == 'lsgan':
self.criterion = nn.MSELoss()
elif type == 'hinge':
self.criterion = nn.ReLU()
def __call__(self, outputs, is_real, is_disc=None):
if self.type == 'hinge':
if is_disc:
if is_real:
outputs = -outputs
return self.criterion(1 + outputs).mean()
else:
return (-outputs).mean()
else:
labels = (self.real_label if is_real else self.fake_label).expand_as(outputs)
loss = self.criterion(outputs, labels)
return loss | 1,484 | 28.7 | 89 | py |
CoordFill | CoordFill-master/models/coordfill.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from scipy import ndimage
import numpy as np
from .ffc import FFCResNetGenerator
from .modules import CoordFillGenerator
from .ffc import FFCResNetGenerator, FFCResnetBlock, ConcatTupleLayer, FFC_BN_ACT
class AttFFC(nn.Module):
"""Convolutional LR stream to estimate the pixel-wise MLPs parameters"""
def __init__(self, ngf):
super(AttFFC, self).__init__()
self.add = FFC_BN_ACT(ngf, ngf, kernel_size=3, stride=1, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0.75, "ratio_gout": 0.75, "enable_lfu": False})
self.minus = FFC_BN_ACT(ngf+1, ngf, kernel_size=3, stride=1, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0, "ratio_gout": 0.75, "enable_lfu": False})
self.mask = FFC_BN_ACT(ngf, 1, kernel_size=3, stride=1, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.Sigmoid,
**{"ratio_gin": 0.75, "ratio_gout": 0, "enable_lfu": False})
def forward(self, x):
x_l, x_g = x if type(x) is tuple else (x, 0)
mask, _ = self.mask((x_l, x_g))
minus_l, minus_g = self.minus(torch.cat([x_l, x_g, mask], 1))
add_l, add_g = self.add((x_l - minus_l, x_g - minus_g))
x_l, x_g = x_l - minus_l + add_l, x_g - minus_g + add_g
return x_l, x_g
class AttFFCResNetGenerator(nn.Module):
"""Convolutional LR stream to estimate the pixel-wise MLPs parameters"""
def __init__(self, ngf):
super(AttFFCResNetGenerator, self).__init__()
self.dowm = nn.Sequential(
nn.ReflectionPad2d(3),
FFC_BN_ACT(4, 64, kernel_size=7, padding=0, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False}),
FFC_BN_ACT(64, 128, kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False}),
FFC_BN_ACT(128, 256, kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0, "ratio_gout": 0, "enable_lfu": False}),
FFC_BN_ACT(256, 512, kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU,
**{"ratio_gin": 0, "ratio_gout": 0.75, "enable_lfu": False}),
)
self.block1 = AttFFC(ngf)
self.block2 = AttFFC(ngf)
self.block3 = AttFFC(ngf)
self.block4 = AttFFC(ngf)
self.block5 = AttFFC(ngf)
self.block6 = AttFFC(ngf)
self.c = ConcatTupleLayer()
def forward(self, x):
x = self.dowm(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.c(x)
return x
from .ffc_baseline import MLPModel
class CoordFill(nn.Module):
def __init__(self, args, name, mask_prediction=False, attffc=False,
scale_injection=False):
super(CoordFill, self).__init__()
self.args = args
self.n_channels = args.n_channels
self.n_classes = args.n_classes
self.out_dim = args.n_classes
self.in_size = 256
self.name = name
self.mask_prediction = mask_prediction
self.attffc = attffc
self.scale_injection = scale_injection
self.opt = self.get_opt()
self.asap = CoordFillGenerator(self.opt)
if self.name == 'ffc':
self.refine = FFCResNetGenerator(4, 3, ngf=64, n_downsampling=3,
n_blocks=6, res_dilation=1, decode=True)
elif self.name == 'mlp':
self.refine = MLPModel()
elif self.name == 'coordfill':
if self.attffc:
self.refine = AttFFCResNetGenerator(512)
else:
self.refine = FFCResNetGenerator(4, 3, ngf=64, n_downsampling=3,
n_blocks=6, res_dilation=1, decode=False)
def get_opt(self):
from yacs.config import CfgNode as CN
opt = CN()
opt.label_nc = 0
# opt.label_nc = 1
opt.lr_instance = False
opt.crop_size = 512
opt.ds_scale = 32
opt.aspect_ratio = 1.0
opt.contain_dontcare_label = False
opt.no_instance_edge = True
opt.no_instance_dist = True
opt.gpu_ids = 0
opt.output_nc = 3
opt.hr_width = 64
opt.hr_depth = 5
opt.scale_injection = self.scale_injection
opt.no_one_hot = False
opt.lr_instance = False
opt.norm_G = 'batch'
opt.lr_width = 256
opt.lr_max_width = 256
opt.lr_depth = 5
opt.learned_ds_factor = 1
opt.reflection_pad = False
return opt
def forward(self, inp):
img, mask = inp
hr_hole = img * mask
lr_img = F.interpolate(img, size=(self.in_size, self.in_size), mode='bilinear')
lr_mask = F.interpolate(mask, size=(self.in_size, self.in_size), mode='nearest')
lr_hole = lr_img * lr_mask
lr_features = self.asap.lowres_stream(self.refine, torch.cat([lr_hole, lr_mask], dim=1), hr_hole)
output = self.asap.highres_stream(hr_hole, lr_features)
if self.mask_prediction:
output = output * (1 - mask) + hr_hole
return output
def mask_predict(self, inp):
img, mask = inp
hr_hole = img * mask
lr_img = F.interpolate(img, size=(self.in_size, self.in_size), mode='bilinear')
lr_mask = F.interpolate(mask, size=(self.in_size, self.in_size), mode='nearest')
lr_hole = lr_img * lr_mask
lr_features, temp_mask = self.asap.lowres_stream.mask_predict(self.refine, torch.cat([lr_hole, lr_mask], dim=1), hr_hole, mask)
output = self.asap.highres_stream.mask_predict(hr_hole, lr_features, mask, temp_mask)
output = output * (1 - mask) + hr_hole
return output
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
device=torch.device('cuda')
# device=torch.device('cpu')
from models import register
from argparse import Namespace
@register('asap')
def make_unet(n_channels=3, n_classes=3, no_upsampling=False):
args = Namespace()
args.n_channels = n_channels
args.n_classes = n_classes
args.no_upsampling = no_upsampling
return LPTN(args) | 7,669 | 36.598039 | 135 | py |
CoordFill | CoordFill-master/models/bn_helper.py | import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
relu_inplace = True
import torch
BatchNorm2d = torch.nn.BatchNorm2d
BatchNorm2d_class = BatchNorm2d
relu_inplace = False | 451 | 27.25 | 70 | py |
CoordFill | CoordFill-master/models/ffc_baseline.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from scipy import ndimage
import numpy as np
class ResnetBlock_remove_IN(nn.Module):
def __init__(self, dim, dilation=1, use_spectral_norm=True):
super(ResnetBlock_remove_IN, self).__init__()
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(dilation),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation, bias=not use_spectral_norm), use_spectral_norm),
nn.ReLU(),
nn.ReflectionPad2d(1),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1, bias=not use_spectral_norm), use_spectral_norm),
)
def forward(self, x):
out = x + self.conv_block(x)
# Remove ReLU at the end of the residual block
# http://torch.ch/blog/2016/02/04/resnets.html
return out
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
class MLPModel(nn.Module):
"""Convolutional LR stream to estimate the pixel-wise MLPs parameters"""
def __init__(self):
super(MLPModel, self).__init__()
self.refine = FFCResNetGenerator(4, 3, ngf=64,
n_downsampling=3, n_blocks=6, res_dilation=1, decode=False)
self.mapping = nn.Conv2d(64 * 8, 64, 1)
self.mlp = nn.Sequential(
nn.Conv2d(64, 64, 1),
nn.ReLU(),
nn.Conv2d(64, 64, 1),
nn.ReLU(),
nn.Conv2d(64, 64, 1),
nn.ReLU(),
nn.Conv2d(64, 64, 1),
nn.ReLU(),
nn.Conv2d(64, 3, 1),
)
def forward(self, x):
bs, _, h, w = x.size()
x = self.refine(x)
x = self.mapping(x)
x = F.interpolate(x, size=(h, w), mode='nearest')
x = self.mlp(x)
x = torch.tanh(x)
return x
from .ffc import FFCResNetGenerator, FFCResnetBlock, ConcatTupleLayer, FFC_BN_ACT
class FFC(nn.Module):
def __init__(self, args, name, mask_prediction=False):
super(FFC, self).__init__()
self.args = args
self.n_channels = args.n_channels
self.n_classes = args.n_classes
self.out_dim = args.n_classes
self.name = name
self.mask_prediction = mask_prediction
if self.name == 'ffc':
self.refine = FFCResNetGenerator(4, 3, ngf=64, n_downsampling=3, n_blocks=6, res_dilation=1, decode=True)
elif self.name == 'mlp':
self.refine = MLPModel()
def forward(self, inp):
img, mask = inp
hole = img * mask
output = self.refine(torch.cat([hole, mask], dim=1))
if self.mask_prediction:
output = output * (1 - mask) + hole
return output, output
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
device=torch.device('cuda')
# device=torch.device('cpu')
from models import register
from argparse import Namespace
@register('ffc')
def make_unet(n_channels=3, n_classes=3, no_upsampling=False):
args = Namespace()
args.n_channels = n_channels
args.n_classes = n_classes
args.no_upsampling = no_upsampling
return FFC(args)
| 4,182 | 32.464 | 164 | py |
CoordFill | CoordFill-master/models/LPIPS/models/base_model.py | import os
import torch
import sys
sys.path.insert(1, './LPIPS/')
# import util.util as util
from torch.autograd import Variable
from pdb import set_trace as st
from IPython import embed
class BaseModel():
def __init__(self):
pass;
def name(self):
return 'BaseModel'
def initialize(self, use_gpu=True):
self.use_gpu = use_gpu
self.Tensor = torch.cuda.FloatTensor if self.use_gpu else torch.Tensor
# self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def forward(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, path, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(path, save_filename)
torch.save(network.state_dict(), save_path)
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
# embed()
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
print('Loading network from %s'%save_path)
network.load_state_dict(torch.load(save_path))
def update_learning_rate():
pass
def get_image_paths(self):
return self.image_paths
def save_done(self, flag=False):
np.save(os.path.join(self.save_dir, 'done_flag'),flag)
np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
| 1,794 | 26.19697 | 78 | py |
CoordFill | CoordFill-master/models/LPIPS/models/pretrained_networks.py | from collections import namedtuple
import torch
from torchvision import models
from IPython import embed
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2,5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
# model = models.alexnet(pretrained=False)
# model.load_state_dict(torch.load('/apdcephfs/private_weihuangliu/pretrained_weights/alexnet-owt-7be5be79.pth'))
# alexnet_pretrained_features = model.features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if(num==18):
self.net = models.resnet18(pretrained=pretrained)
elif(num==34):
self.net = models.resnet34(pretrained=pretrained)
elif(num==50):
self.net = models.resnet50(pretrained=pretrained)
elif(num==101):
self.net = models.resnet101(pretrained=pretrained)
elif(num==152):
self.net = models.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
| 6,788 | 35.5 | 121 | py |
CoordFill | CoordFill-master/models/LPIPS/models/networks_basic.py |
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
from pdb import set_trace as st
from skimage import color
from IPython import embed
from . import pretrained_networks as pn
# from PerceptualSimilarity.util import util
from ..util import util
# Off-the-shelf deep network
class PNet(nn.Module):
'''Pre-trained network with all channels equally weighted by default'''
def __init__(self, pnet_type='vgg', pnet_rand=False, use_gpu=True):
super(PNet, self).__init__()
self.use_gpu = use_gpu
self.pnet_type = pnet_type
self.pnet_rand = pnet_rand
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1))
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1))
if(self.pnet_type in ['vgg','vgg16']):
self.net = pn.vgg16(pretrained=not self.pnet_rand,requires_grad=False)
elif(self.pnet_type=='alex'):
self.net = pn.alexnet(pretrained=not self.pnet_rand,requires_grad=False)
elif(self.pnet_type[:-2]=='resnet'):
self.net = pn.resnet(pretrained=not self.pnet_rand,requires_grad=False, num=int(self.pnet_type[-2:]))
elif(self.pnet_type=='squeeze'):
self.net = pn.squeezenet(pretrained=not self.pnet_rand,requires_grad=False)
self.L = self.net.N_slices
if(use_gpu):
self.net.cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
def forward(self, in0, in1, retPerLayer=False):
in0_sc = (in0 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
in1_sc = (in1 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
outs0 = self.net.forward(in0_sc)
outs1 = self.net.forward(in1_sc)
if(retPerLayer):
all_scores = []
for (kk,out0) in enumerate(outs0):
cur_score = (1.-util.cos_sim(outs0[kk],outs1[kk]))
if(kk==0):
val = 1.*cur_score
else:
# val = val + self.lambda_feat_layers[kk]*cur_score
val = val + cur_score
if(retPerLayer):
all_scores+=[cur_score]
if(retPerLayer):
return (val, all_scores)
else:
return val
# Learned perceptual metric
class PNetLin(nn.Module):
def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, use_gpu=True, spatial=False, version='0.1'):
super(PNetLin, self).__init__()
self.use_gpu = use_gpu
self.pnet_type = pnet_type
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.version = version
if(self.pnet_type in ['vgg','vgg16']):
net_type = pn.vgg16
self.chns = [64,128,256,512,512]
elif(self.pnet_type=='alex'):
net_type = pn.alexnet
self.chns = [64,192,384,256,256]
elif(self.pnet_type=='squeeze'):
net_type = pn.squeezenet
self.chns = [64,128,256,384,384,512,512]
if(self.pnet_tune):
self.net = net_type(pretrained=not self.pnet_rand,requires_grad=True)
else:
self.net = [net_type(pretrained=not self.pnet_rand,requires_grad=True),]
self.lin0 = NetLinLayer(self.chns[0],use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1],use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2],use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3],use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4],use_dropout=use_dropout)
self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5],use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6],use_dropout=use_dropout)
self.lins+=[self.lin5,self.lin6]
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1))
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1))
if(use_gpu):
if(self.pnet_tune):
self.net.cuda()
else:
self.net[0].cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
self.lin0.cuda()
self.lin1.cuda()
self.lin2.cuda()
self.lin3.cuda()
self.lin4.cuda()
if(self.pnet_type=='squeeze'):
self.lin5.cuda()
self.lin6.cuda()
def forward(self, in0, in1):
in0_sc = (in0 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
in1_sc = (in1 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
if(self.version=='0.0'):
# v0.0 - original release had a bug, where input was not scaled
in0_input = in0
in1_input = in1
else:
# v0.1
in0_input = in0_sc
in1_input = in1_sc
if(self.pnet_tune):
outs0 = self.net.forward(in0_input)
outs1 = self.net.forward(in1_input)
else:
outs0 = self.net[0].forward(in0_input)
outs1 = self.net[0].forward(in1_input)
feats0 = {}
feats1 = {}
diffs = [0]*len(outs0)
for (kk,out0) in enumerate(outs0):
feats0[kk] = util.normalize_tensor(outs0[kk])
feats1[kk] = util.normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk]-feats1[kk])**2
# diffs[kk] = (outs0[kk]-outs1[kk])**2
if self.spatial:
lin_models = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
if(self.pnet_type=='squeeze'):
lin_models.extend([self.lin5, self.lin6])
res = [lin_models[kk].model(diffs[kk]) for kk in range(len(diffs))]
return res
val1 = torch.mean(torch.mean(self.lin0.model(diffs[0]),dim=3),dim=2)
val2 = torch.mean(torch.mean(self.lin1.model(diffs[1]),dim=3),dim=2)
val3 = torch.mean(torch.mean(self.lin2.model(diffs[2]),dim=3),dim=2)
val4 = torch.mean(torch.mean(self.lin3.model(diffs[3]),dim=3),dim=2)
val5 = torch.mean(torch.mean(self.lin4.model(diffs[4]),dim=3),dim=2)
val = val1 + val2 + val3 + val4 + val5
val_out = val.view(val.size()[0],val.size()[1],1,1)
val_out2 = [val1, val2, val3, val4, val5]
if(self.pnet_type=='squeeze'):
val6 = val + torch.mean(torch.mean(self.lin5.model(diffs[5]),dim=3),dim=2)
val7 = val6 + torch.mean(torch.mean(self.lin6.model(diffs[6]),dim=3),dim=2)
val7 = val7.view(val7.size()[0],val7.size()[1],1,1)
return val7
return val_out, val_out2
# return [val1, val2, val3, val4, val5]
class Dist2LogitLayer(nn.Module):
''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
def __init__(self, chn_mid=32,use_sigmoid=True):
super(Dist2LogitLayer, self).__init__()
layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
if(use_sigmoid):
layers += [nn.Sigmoid(),]
self.model = nn.Sequential(*layers)
def forward(self,d0,d1,eps=0.1):
return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
class BCERankingLoss(nn.Module):
def __init__(self, use_gpu=True, chn_mid=32):
super(BCERankingLoss, self).__init__()
self.use_gpu = use_gpu
self.net = Dist2LogitLayer(chn_mid=chn_mid)
self.parameters = list(self.net.parameters())
self.loss = torch.nn.BCELoss()
self.model = nn.Sequential(*[self.net])
if(self.use_gpu):
self.net.cuda()
def forward(self, d0, d1, judge):
per = (judge+1.)/2.
if(self.use_gpu):
per = per.cuda()
self.logit = self.net.forward(d0,d1)
return self.loss(self.logit, per)
class NetLinLayer(nn.Module):
''' A single linear layer which does a 1x1 conv '''
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(),] if(use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
self.model = nn.Sequential(*layers)
# L2, DSSIM metrics
class FakeNet(nn.Module):
def __init__(self, use_gpu=True, colorspace='Lab'):
super(FakeNet, self).__init__()
self.use_gpu = use_gpu
self.colorspace=colorspace
class L2(FakeNet):
def forward(self, in0, in1):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
(N,C,X,Y) = in0.size()
value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
return value
elif(self.colorspace=='Lab'):
value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
class DSSIM(FakeNet):
def forward(self, in0, in1):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
elif(self.colorspace=='Lab'):
value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Network',net)
print('Total number of parameters: %d' % num_params)
| 10,730 | 37.188612 | 136 | py |
CoordFill | CoordFill-master/models/LPIPS/models/dist_model.py |
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from IPython import embed
from . import networks_basic as networks
# from PerceptualSimilarity.util import util
import sys
sys.path.insert(1, './LPIPS/')
from ..util import util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original
'''
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
# model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net)
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', '..', 'weights/v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'): # pretrained network
assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array
OUTPUT
computed distances between in0 and in1
'''
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0, _ = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)
return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
# var_judge
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
# bar.update(i)
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
# bar.update(i)
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| 13,452 | 39.521084 | 278 | py |
CoordFill | CoordFill-master/models/LPIPS/util/util.py | from __future__ import print_function
import numpy as np
from PIL import Image
import inspect
import re
import numpy as np
import os
import collections
import matplotlib.pyplot as plt
from scipy.ndimage.interpolation import zoom
from skimage.measure import compare_ssim
# from skimage.metrics import
from skimage import measure
import torch
from IPython import embed
import cv2
from datetime import datetime
def datetime_str():
now = datetime.now()
return '%04d-%02d-%02d-%02d-%02d-%02d'%(now.year,now.month,now.day,now.hour,now.minute,now.second)
def read_text_file(in_path):
fid = open(in_path,'r')
vals = []
cur_line = fid.readline()
while(cur_line!=''):
vals.append(float(cur_line))
cur_line = fid.readline()
fid.close()
return np.array(vals)
def bootstrap(in_vec,num_samples=100,bootfunc=np.mean):
from astropy import stats
return stats.bootstrap(np.array(in_vec),bootnum=num_samples,bootfunc=bootfunc)
def rand_flip(input1,input2):
if(np.random.binomial(1,.5)==1):
return (input1,input2)
else:
return (input2,input1)
def l2(p0, p1, range=255.):
return .5*np.mean((p0 / range - p1 / range)**2)
def psnr(p0, p1, peak=255.):
return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
def dssim(p0, p1, range=255.):
# embed()
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
def rgb2lab(in_img,mean_cent=False):
from skimage import color
img_lab = color.rgb2lab(in_img)
if(mean_cent):
img_lab[:,:,0] = img_lab[:,:,0]-50
return img_lab
def normalize_blob(in_feat,eps=1e-10):
norm_factor = np.sqrt(np.sum(in_feat**2,axis=1,keepdims=True))
return in_feat/(norm_factor+eps)
def cos_sim_blob(in0,in1):
in0_norm = normalize_blob(in0)
in1_norm = normalize_blob(in1)
(N,C,X,Y) = in0_norm.shape
return np.mean(np.mean(np.sum(in0_norm*in1_norm,axis=1),axis=1),axis=1)
def normalize_tensor(in_feat,eps=1e-10):
# norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3]).repeat(1,in_feat.size()[1],1,1)
norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3])
return in_feat/(norm_factor.expand_as(in_feat)+eps)
def cos_sim(in0,in1):
in0_norm = normalize_tensor(in0)
in1_norm = normalize_tensor(in1)
N = in0.size()[0]
X = in0.size()[2]
Y = in0.size()[3]
return torch.mean(torch.mean(torch.sum(in0_norm*in1_norm,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the conve
def tensor2np(tensor_obj):
# change dimension of a tensor object into a numpy array
return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
def np2tensor(np_obj):
# change dimenion of np array into tensor array
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
# image tensor to lab tensor
from skimage import color
img = tensor2im(image_tensor)
# print('img_rgb',img.flatten())
img_lab = color.rgb2lab(img)
# print('img_lab',img_lab.flatten())
if(mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
if(to_norm and not mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
img_lab = img_lab/100.
return np2tensor(img_lab)
def tensorlab2tensor(lab_tensor,return_inbnd=False):
from skimage import color
import warnings
warnings.filterwarnings("ignore")
lab = tensor2np(lab_tensor)*100.
lab[:,:,0] = lab[:,:,0]+50
# print('lab',lab)
rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
# print('rgb',rgb_back)
if(return_inbnd):
# convert back to lab, see if we match
lab_back = color.rgb2lab(rgb_back.astype('uint8'))
# print('lab_back',lab_back)
# print('lab==lab_back',np.isclose(lab_back,lab,atol=1.))
# print('lab-lab_back',np.abs(lab-lab_back))
mask = 1.*np.isclose(lab_back,lab,atol=2.)
mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
return (im2tensor(rgb_back),mask)
else:
return im2tensor(rgb_back)
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2vec(vector_tensor):
return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def grab_patch(img_in, P, yy, xx):
return img_in[yy:yy+P,xx:xx+P,:]
def load_image(path):
if(path[-3:] == 'dng'):
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
# img = plt.imread(path)
elif(path[-3:]=='bmp' or path[-3:]=='jpg' or path[-3:]=='png'):
import cv2
return cv2.imread(path)[:,:,::-1]
else:
img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
return img
def resize_image(img, max_size=256):
[Y, X] = img.shape[:2]
# resize
max_dim = max([Y, X])
zoom_factor = 1. * max_size / max_dim
img = zoom(img, [zoom_factor, zoom_factor, 1])
return img
def resize_image_zoom(img, zoom_factor=1., order=3):
if(zoom_factor==1):
return img
else:
return zoom(img, [zoom_factor, zoom_factor, 1], order=order)
def save_image(image_numpy, image_path, ):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def prep_display_image(img, dtype='uint8'):
if(dtype == 'uint8'):
return np.clip(img, 0, 255).astype('uint8')
else:
return np.clip(img, 0, 1.)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [
e for e in dir(object) if isinstance(
getattr(
object,
e),
collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print(
'mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' %
(np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def rgb2lab(input):
from skimage import color
return color.rgb2lab(input / 255.)
def montage(
imgs,
PAD=5,
RATIO=16 / 9.,
EXTRA_PAD=(
False,
False),
MM=-1,
NN=-1,
primeDir=0,
verbose=False,
returnGridPos=False,
backClr=np.array(
(0,
0,
0))):
# INPUTS
# imgs YxXxMxN or YxXxN
# PAD scalar number of pixels in between
# RATIO scalar target ratio of cols/rows
# MM scalar # rows, if specified, overrides RATIO
# NN scalar # columns, if specified, overrides RATIO
# primeDir scalar 0 for top-to-bottom, 1 for left-to-right
# OUTPUTS
# mont_imgs MM*Y x NN*X x M big image with everything montaged
# def montage(imgs, PAD=5, RATIO=16/9., MM=-1, NN=-1, primeDir=0,
# verbose=False, forceFloat=False):
if(imgs.ndim == 3):
toExp = True
imgs = imgs[:, :, np.newaxis, :]
else:
toExp = False
Y = imgs.shape[0]
X = imgs.shape[1]
M = imgs.shape[2]
N = imgs.shape[3]
PADS = np.array((PAD))
if(PADS.flatten().size == 1):
PADY = PADS
PADX = PADS
else:
PADY = PADS[0]
PADX = PADS[1]
if(MM == -1 and NN == -1):
NN = np.ceil(np.sqrt(1.0 * N * RATIO))
MM = np.ceil(1.0 * N / NN)
NN = np.ceil(1.0 * N / MM)
elif(MM == -1):
MM = np.ceil(1.0 * N / NN)
elif(NN == -1):
NN = np.ceil(1.0 * N / MM)
if(primeDir == 0): # write top-to-bottom
[grid_mm, grid_nn] = np.meshgrid(
np.arange(MM, dtype='uint'), np.arange(NN, dtype='uint'))
elif(primeDir == 1): # write left-to-right
[grid_nn, grid_mm] = np.meshgrid(
np.arange(NN, dtype='uint'), np.arange(MM, dtype='uint'))
grid_mm = np.uint(grid_mm.flatten()[0:N])
grid_nn = np.uint(grid_nn.flatten()[0:N])
EXTRA_PADY = EXTRA_PAD[0] * PADY
EXTRA_PADX = EXTRA_PAD[0] * PADX
# mont_imgs = np.zeros(((Y+PAD)*MM-PAD, (X+PAD)*NN-PAD, M), dtype=use_dtype)
mont_imgs = np.zeros(
(np.uint(
(Y + PADY) * MM - PADY + EXTRA_PADY),
np.uint(
(X + PADX) * NN - PADX + EXTRA_PADX),
M),
dtype=imgs.dtype)
mont_imgs = mont_imgs + \
backClr.flatten()[np.newaxis, np.newaxis, :].astype(mont_imgs.dtype)
for ii in np.random.permutation(N):
# print imgs[:,:,:,ii].shape
# mont_imgs[grid_mm[ii]*(Y+PAD):(grid_mm[ii]*(Y+PAD)+Y), grid_nn[ii]*(X+PAD):(grid_nn[ii]*(X+PAD)+X),:]
mont_imgs[np.uint(grid_mm[ii] *
(Y +
PADY)):np.uint((grid_mm[ii] *
(Y +
PADY) +
Y)), np.uint(grid_nn[ii] *
(X +
PADX)):np.uint((grid_nn[ii] *
(X +
PADX) +
X)), :] = imgs[:, :, :, ii]
if(M == 1):
imgs = imgs.reshape(imgs.shape[0], imgs.shape[1], imgs.shape[3])
if(toExp):
mont_imgs = mont_imgs[:, :, 0]
if(returnGridPos):
# return (mont_imgs,np.concatenate((grid_mm[:,:,np.newaxis]*(Y+PAD),
# grid_nn[:,:,np.newaxis]*(X+PAD)),axis=2))
return (mont_imgs, np.concatenate(
(grid_mm[:, np.newaxis] * (Y + PADY), grid_nn[:, np.newaxis] * (X + PADX)), axis=1))
# return (mont_imgs, (grid_mm,grid_nn))
else:
return mont_imgs
class zeroClipper(object):
def __init__(self, frequency=1):
self.frequency = frequency
def __call__(self, module):
embed()
if hasattr(module, 'weight'):
# module.weight.data = torch.max(module.weight.data, 0)
module.weight.data = torch.max(module.weight.data, 0) + 100
def flatten_nested_list(nested_list):
# only works for list of list
accum = []
for sublist in nested_list:
for item in sublist:
accum.append(item)
return accum
def read_file(in_path,list_lines=False):
agg_str = ''
f = open(in_path,'r')
cur_line = f.readline()
while(cur_line!=''):
agg_str+=cur_line
cur_line = f.readline()
f.close()
if(list_lines==False):
return agg_str.replace('\n','')
else:
line_list = agg_str.split('\n')
ret_list = []
for item in line_list:
if(item!=''):
ret_list.append(item)
return ret_list
def read_csv_file_as_text(in_path):
agg_str = []
f = open(in_path,'r')
cur_line = f.readline()
while(cur_line!=''):
agg_str.append(cur_line)
cur_line = f.readline()
f.close()
return agg_str
def random_swap(obj0,obj1):
if(np.random.rand() < .5):
return (obj0,obj1,0)
else:
return (obj1,obj0,1)
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| 14,095 | 29.912281 | 153 | py |
CoordFill | CoordFill-master/datasets/wrappers.py | import functools
import random
import math
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from datasets import register
def to_mask(mask):
return transforms.ToTensor()(
transforms.Grayscale(num_output_channels=1)(
transforms.ToPILImage()(mask)))
def resize_fn(img, size):
return transforms.ToTensor()(
transforms.Resize(size)(
transforms.ToPILImage()(img)))
def get_coord(shape):
ranges = None
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
return ret
@register('sr-implicit-paired')
class SRImplicitPaired(Dataset):
def __init__(self, dataset, inp_size=None, augment=False, sample_q=None):
self.dataset = dataset
self.inp_size = inp_size
self.augment = augment
self.sample_q = sample_q
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
img, mask = self.dataset[[idx, idx]]
size = self.inp_size
img = resize_fn(img, (size, size))
mask = resize_fn(mask, (size, size))
mask = to_mask(mask)
mask[mask > 0] = 1
mask = 1 - mask
return {
'inp': img,
'gt_rgb': img,
'mask': mask,
}
@register('sr-implicit-uniform-varied')
class SRImplicitUniformVaried(Dataset):
def __init__(self, dataset, size_min, size_max=None,
augment=False):
self.dataset = dataset
self.size_min = size_min
if size_max is None:
size_max = size_min
self.size_max = size_max
self.augment = augment
self.count = 0
self.scale = 0
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
img, mask = self.dataset[[idx, idx]]
size = self.size_max
img = resize_fn(img, (size, size))
mask = resize_fn(mask, (size, size))
mask = to_mask(mask)
mask[mask > 0] = 1
mask = 1 - mask
if self.augment:
if random.random() < 0.5:
img = img.flip(-1)
mask = mask.flip(-1)
return {
'inp': img,
'gt_rgb': img,
'mask': mask,
}
| 2,575 | 22.851852 | 77 | py |
CoordFill | CoordFill-master/datasets/image_folder.py | import os
import json
from PIL import Image
import pickle
import imageio
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from datasets import register
@register('image-folder')
class ImageFolder(Dataset):
def __init__(self, path, split_file=None, split_key=None, first_k=None,
repeat=1, cache=False):
self.repeat = repeat
self.cache = False
if split_file is None:
filenames = sorted(os.listdir(path))
else:
with open(split_file, 'r') as f:
filenames = json.load(f)[split_key]
if first_k is not None:
filenames = filenames[:first_k]
self.files = []
for filepath, dirnames, filenames in os.walk(path):
for filename in filenames:
if self.cache:
self.files.append(
transforms.ToTensor()(Image.open(os.path.join(filepath, filename)).convert('RGB')))
else:
self.files.append(os.path.join(filepath, filename))
if first_k is not None:
self.files = self.files[:first_k]
def __len__(self):
return len(self.files) * self.repeat
def __getitem__(self, idx):
x = self.files[idx % len(self.files)]
if self.cache:
return x
else:
return transforms.ToTensor()(Image.open(x).convert('RGB'))
@register('paired-image-folders')
class PairedImageFolders(Dataset):
def __init__(self, root_path_1, root_path_2, **kwargs):
self.dataset_1 = ImageFolder(root_path_1, **kwargs)
self.dataset_2 = ImageFolder(root_path_2, **kwargs)
def __len__(self):
return len(self.dataset_1)
def __getitem__(self, idx):
idx1, idx2 = idx
return self.dataset_1[idx1], self.dataset_2[idx2]
| 1,885 | 27.575758 | 107 | py |
cycle-transformer | cycle-transformer-main/test.py | # This code is released under the CC BY-SA 4.0 license.
import glob
import os
import numpy as np
import pandas as pd
import pydicom
import torch
from skimage.metrics import structural_similarity as ssim
from models import create_model
from options.train_options import TrainOptions
@torch.no_grad()
def compute_eval_metrics_gan(root_path, tagA='ARTERIAL', tagB='NATIVE', device='cpu'):
# root_path - is the path to the raw Coltea-Lung-CT-100W data set.
opt = TrainOptions().parse()
opt.load_iter = 40
opt.isTrain = False
opt.device = device
model = create_model(opt)
model.setup(opt)
gen = model.netG_A
gen.eval()
eval_dirs = pd.read_csv(os.path.join(root_path, 'test_data.csv'))
eval_dirs = list(eval_dirs.iloc[:, 1])
mae_pre = []
mae_post = []
rmse_pre = []
rmse_post = []
ssim_pre = []
ssim_post = []
for path in glob.glob(os.path.join(root_path, 'Coltea-Lung-CT-100W/*')):
if not path.split('/')[-1] in eval_dirs:
continue
for scan in glob.glob(os.path.join(path, tagA, 'DICOM', '*')):
orig_img = pydicom.dcmread(scan).pixel_array
native_img = pydicom.dcmread(scan.replace(tagA, tagB)).pixel_array
# Scale native image
native_img[native_img < 0] = 0
native_img = native_img / 1e3
native_img = native_img - 1
# Scale original image, which is transform
orig_img[orig_img < 0] = 0
orig_img = orig_img / 1e3
orig_img = orig_img - 1
orig_img_in = np.expand_dims(orig_img, 0).astype(np.float)
orig_img_in = torch.from_numpy(orig_img_in).float().to(device)
orig_img_in = orig_img_in.unsqueeze(0)
native_fake = gen(orig_img_in)[0, 0].detach().cpu().numpy()
mae_pre.append(np.mean(np.abs(orig_img - native_img)))
mae_post.append(np.mean(np.abs(native_fake - native_img)))
rmse_pre.append(np.sqrt(np.mean((orig_img - native_img)**2)))
rmse_post.append(np.sqrt(np.mean((native_fake - native_img)**2)))
ssim_pre.append(ssim(orig_img, native_img))
ssim_post.append(ssim(native_fake, native_img))
mae_pre = np.mean(mae_pre)
mae_post = np.mean(mae_post)
rmse_pre = np.mean(rmse_pre)
rmse_post = np.mean(rmse_post)
ssim_pre = np.mean(ssim_pre)
ssim_post = np.mean(ssim_post)
print(f"MAE before {mae_pre}, after {mae_post}")
print(f"RMSE before {rmse_pre}, after {rmse_post}")
print(f"SSIM before {ssim_pre}, after {ssim_post}")
if __name__ == '__main__':
compute_eval_metrics_gan(
root_path='/path/to/data/set/',
device='cuda'
)
| 2,738 | 29.775281 | 86 | py |
cycle-transformer | cycle-transformer-main/options/base_options.py | import argparse
import os
from util import util
import torch
import models as models
class BaseOptions:
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', default='/path/to/ct/dataset', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='cytran', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--device', type=str, default='cuda', help='cuda or cpu')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cytran', help='chooses which model to use. [transformer_cvt | transformer | cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='unet_256', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='ct', help='chooses how datasets are loaded. [ct, unaligned | aligned | single | colorization]')
parser.add_argument('--Aclass', type=str, default='ARTERIAL')
parser.add_argument('--Bclass', type=str, default='NATIVE')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--img_size', type=int, default=512, help='scale images to this size')
parser.add_argument('--load_size', type=int, default=512, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=512, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=512, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
# dataset_name = opt.dataset_mode
# dataset_option_setter = data.get_option_setter(dataset_name)
# parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 8,414 | 58.680851 | 235 | py |
cycle-transformer | cycle-transformer-main/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = '%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
try:
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
except:
torch.save(net.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 10,583 | 44.038298 | 260 | py |
cycle-transformer | cycle-transformer-main/models/cytran_model.py | # This code is released under the CC BY-SA 4.0 license.
import torch
import itertools
from util import ImagePool
from models.conv_transformer import ConvTransformer
from .base_model import BaseModel
from . import networks
class CyTranModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# self.clip_dis = opt.clip_dis
# self.clip_gen = opt.clip_gen
# define networks (both Generators and discriminators)
self.netG_A = ConvTransformer(input_nc=opt.input_nc, n_downsampling=opt.n_downsampling, depth=opt.depth,
heads=opt.heads, dropout=opt.dropout, ngf=opt.ngf_cytran).to(opt.device)
self.netG_B = ConvTransformer(input_nc=opt.input_nc, n_downsampling=opt.n_downsampling, depth=opt.depth,
heads=opt.heads, dropout=opt.dropout, ngf=opt.ngf_cytran).to(opt.device)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device).float()
self.real_B = input['B' if AtoB else 'A'].to(self.device).float()
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
# torch.nn.utils.clip_grad_norm_(self.netD_A.parameters(), self.clip_dis)
# torch.nn.utils.clip_grad_norm_(self.netD_B.parameters(), self.clip_dis)
self.optimizer_D.step() # update D_A and D_B's weights
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
# torch.nn.utils.clip_grad_norm_(self.netG_A.parameters(), self.clip_gen)
# torch.nn.utils.clip_grad_norm_(self.netG_B.parameters(), self.clip_gen)
self.optimizer_G.step() # update G_A and G_B's weights
| 10,350 | 54.352941 | 362 | py |
cycle-transformer | cycle-transformer-main/models/conv_transformer.py | # This code is released under the CC BY-SA 4.0 license.
from einops import rearrange
from torch import nn, einsum
import functools
class Encoder(nn.Module):
def __init__(self, input_nc, ngf=16, norm_layer=nn.BatchNorm2d, n_downsampling=3):
super(Encoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
self.down_sampling = nn.Sequential(*model)
def forward(self, input):
input = self.down_sampling(input)
return input
class Decoder(nn.Module):
def __init__(self, output_nc, ngf=16, norm_layer=nn.BatchNorm2d, n_downsampling=3):
super(Decoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
self.up_sampling = nn.Sequential(*model)
def forward(self, input):
input = self.up_sampling(input)
return input
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = rearrange(x, 'b c h w -> b h w c')
x = self.norm(x)
x = rearrange(x, 'b h w c -> b c h w')
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0.):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(dim * mult, dim, 1),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class DepthWiseConv2d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding, stride, bias=True):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size=kernel_size, padding=padding, groups=dim_in, stride=stride,
bias=bias),
nn.BatchNorm2d(dim_in),
nn.Conv2d(dim_in, dim_out, kernel_size=1, bias=bias)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, proj_kernel, kv_proj_stride, heads=8, dim_head=64, dropout=0.):
super().__init__()
inner_dim = dim_head * heads
padding = proj_kernel // 2
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim=-1)
self.to_q = DepthWiseConv2d(dim, inner_dim, 3, padding=padding, stride=1, bias=False)
self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding=padding, stride=kv_proj_stride, bias=False)
self.to_out = nn.Sequential(
nn.Conv2d(inner_dim, dim, 1),
nn.Dropout(dropout)
)
def forward(self, x):
shape = x.shape
b, n, _, y, h = *shape, self.heads
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=1))
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h=h), (q, k, v))
dots = einsum('b i d, b j d -> b i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h=h, y=y)
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, proj_kernel, kv_proj_stride, depth, heads, dim_head=64, mlp_mult=4, dropout=0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, proj_kernel=proj_kernel, kv_proj_stride=kv_proj_stride, heads=heads,
dim_head=dim_head, dropout=dropout)),
PreNorm(dim, FeedForward(dim, mlp_mult, dropout=dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ConvTransformer(nn.Module):
def __init__(self, input_nc, n_downsampling, depth, heads, proj_kernel=3,
mlp_mult=4, dropout=0., ngf=16):
super().__init__()
dim = (2 ** n_downsampling) * ngf
self.conv_encoder = Encoder(input_nc=input_nc, ngf=ngf, n_downsampling=n_downsampling)
self.conv_decoder = Decoder(output_nc=input_nc, ngf=ngf, n_downsampling=n_downsampling)
self.transformer = Transformer(dim=dim, proj_kernel=proj_kernel, kv_proj_stride=2, depth=depth, heads=heads,
mlp_mult=mlp_mult, dropout=dropout)
def forward(self, img):
x = self.conv_encoder(img)
x = self.transformer(x)
x = self.conv_decoder(x)
return x
| 6,016 | 34.187135 | 116 | py |
cycle-transformer | cycle-transformer-main/models/networks.py | # This code is released under the CC BY-SA 4.0 license.
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input) | 28,452 | 45.115073 | 167 | py |
cycle-transformer | cycle-transformer-main/models/cycle_gan_model.py | # This code is released under the CC BY-SA 4.0 license.
import torch
import itertools
from util import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device).float()
self.real_B = input['B' if AtoB else 'A'].to(self.device).float()
# self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
| 10,621 | 52.918782 | 362 | py |
cycle-transformer | cycle-transformer-main/util/image_pool.py | import random
import torch
class ImagePool:
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 2,224 | 39.454545 | 140 | py |
cycle-transformer | cycle-transformer-main/util/util.py | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 3,175 | 29.538462 | 119 | py |
cycle-transformer | cycle-transformer-main/data/colorization_dataset.py | import os
from data.base_dataset import BaseDataset, get_transform
from data import make_dataset
from skimage import color # require skimage
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
class ColorizationDataset(BaseDataset):
"""This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
This dataset is required by pix2pix-based colorization model ('--model colorization')
"""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, the number of channels for input image is 1 (L) and
the number of channels for output image is 2 (ab). The direction is from A to B
"""
parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
self.transform = get_transform(self.opt, convert=False)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - the L channel of an image
B (tensor) - - the ab channels of the same image
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
path = self.AB_paths[index]
im = Image.open(path).convert('RGB')
im = self.transform(im)
im = np.array(im)
lab = color.rgb2lab(im).astype(np.float32)
lab_t = transforms.ToTensor()(lab)
A = lab_t[[0], ...] / 50.0 - 1.0
B = lab_t[[1, 2], ...] / 110.0
return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
| 2,704 | 38.202899 | 141 | py |
cycle-transformer | cycle-transformer-main/data/base_dataset.py | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 5,400 | 33.183544 | 141 | py |
cycle-transformer | cycle-transformer-main/data/image_folder.py | """A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 1,885 | 27.575758 | 122 | py |
cycle-transformer | cycle-transformer-main/data/__init__.py | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "pix2pix.data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader:
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,270 | 37.034884 | 176 | py |
GreedyAC | GreedyAC-master/utils/experience_replay.py | # Import modules
import numpy as np
import torch
from abc import ABC, abstractmethod
# Class definitions
class ExperienceReplay(ABC):
"""
Abstract base class ExperienceReplay implements an experience replay
buffer. The specific kind of buffer is determined by classes which
implement this base class. For example, NumpyBuffer stores all
transitions in a numpy array while TorchBuffer implements the buffer
as a torch tensor.
Attributes
----------
self.cast : func
A function which will cast data into an appropriate form to be
stored in the replay buffer. All incoming data is assumed to be
a numpy array.
"""
def __init__(self, capacity, seed, state_size, action_size,
device=None):
"""
Constructor
Parameters
----------
capacity : int
The capacity of the buffer
seed : int
The random seed used for sampling from the buffer
state_size : tuple[int]
The number of dimensions of the state features
action_size : int
The number of dimensions in the action vector
"""
self.device = device
self.is_full = False
self.position = 0
self.capacity = capacity
# Set the casting function, which is needed for implementations which
# may keep the ER buffer as a different data structure, for example
# a torch tensor, in this case all data needs to be cast to a torch
# tensor before storing
self.cast = lambda x: x
# Set the random number generator
self.random = np.random.default_rng(seed=seed)
# Save the size of states and actions
self.state_size = state_size
self.action_size = action_size
self._sampleable = False
# Buffer of state, action, reward, next_state, done
self.state_buffer = None
self.action_buffer = None
self.reward_buffer = None
self.next_state_buffer = None
self.done_buffer = None
self.init_buffer()
@property
def sampleable(self):
return self._sampleable
@abstractmethod
def init_buffer(self):
"""
Initializes the buffers on which to store transitions.
Note that different classes which implement this abstract base class
may use different data types as buffers. For example, NumpyBuffer
stores all transitions using a numpy array, while TorchBuffer
stores all transitions on a torch Tensor on a specific device in order
to speed up training by keeping transitions on the same device as
the device which holds the model.
Post-Condition
--------------
The replay buffer self.buffer has been initialized
"""
pass
def push(self, state, action, reward, next_state, done):
"""
Pushes a trajectory onto the replay buffer
Parameters
----------
state : array_like
The state observation
action : array_like
The action taken by the agent in the state
reward : float
The reward seen after taking the argument action in the argument
state
next_state : array_like
The next state transitioned to
done : bool
Whether or not the transition was a transition to a goal state
"""
reward = np.array([reward])
done = np.array([done])
state = self.cast(state)
action = self.cast(action)
reward = self.cast(reward)
next_state = self.cast(next_state)
done = self.cast(done)
self.state_buffer[self.position] = state
self.action_buffer[self.position] = action
self.reward_buffer[self.position] = reward
self.next_state_buffer[self.position] = next_state
self.done_buffer[self.position] = done
if self.position >= self.capacity - 1:
self.is_full = True
self.position = (self.position + 1) % self.capacity
self._sampleable = False
@property
def sampleable(self):
return self._sampleable
def is_sampleable(self, batch_size):
if self.position < batch_size and not self.sampleable:
return False
elif not self._sampleable:
self._sampleable = True
return self.sampleable
def sample(self, batch_size):
"""
Samples a random batch from the buffer
Parameters
----------
batch_size : int
The size of the batch to sample
Returns
-------
5-tuple of torch.Tensor
The arrays of state, action, reward, next_state, and done from the
batch
"""
if not self.is_sampleable(batch_size):
return None, None, None, None, None
# Get the indices for the batch
if self.is_full:
indices = self.random.integers(low=0, high=len(self),
size=batch_size)
else:
indices = self.random.integers(low=0, high=self.position,
size=batch_size)
state = self.state_buffer[indices, :]
action = self.action_buffer[indices, :]
reward = self.reward_buffer[indices]
next_state = self.next_state_buffer[indices, :]
done = self.done_buffer[indices]
return state, action, reward, next_state, done
def __len__(self):
"""
Gets the number of elements in the buffer
Returns
-------
int
The number of elements currently in the buffer
"""
if not self.is_full:
return self.position
else:
return self.capacity
class NumpyBuffer(ExperienceReplay):
"""
Class NumpyBuffer implements an experience replay buffer. This
class stores all states, actions, and rewards as numpy arrays.
For an implementation that uses PyTorch tensors, see
TorchExperienceReplay
"""
def __init__(self, capacity, seed, state_size, action_size,
state_dtype=np.int32, action_dtype=np.int32):
"""
Constructor
Parameters
----------
capacity : int
The capacity of the buffer
seed : int
The random seed used for sampling from the buffer
state_size : tuple[int]
The dimensions of the state features
action_size : int
The number of dimensions in the action vector
"""
self._state_dtype = state_dtype
self._action_dtype = action_dtype
super().__init__(capacity, seed, state_size, action_size, None)
def init_buffer(self):
self.state_buffer = np.zeros((self.capacity, *self.state_size),
dtype=self._state_dtype)
self.next_state_buffer = np.zeros((self.capacity, *self.state_size),
dtype=self._state_dtype)
self.action_buffer = np.zeros((self.capacity, self.action_size),
dtype=self._state_dtype)
self.reward_buffer = np.zeros((self.capacity, 1))
self.done_buffer = np.zeros((self.capacity, 1), dtype=bool)
class TorchBuffer(ExperienceReplay):
"""
Class TorchBuffer implements an experience replay buffer. The
difference between this class and the ExperienceReplay class is that this
class keeps all experiences as a torch Tensor on the appropriate device
so that if using PyTorch, we do not need to cast the batch to a
FloatTensor every time we sample and then place it on the appropriate
device, as this is very time consuming. This class is basically a
PyTorch efficient implementation of ExperienceReplay.
"""
def __init__(self, capacity, seed, state_size, action_size, device):
"""
Constructor
Parameters
----------
capacity : int
The capacity of the buffer
seed : int
The random seed used for sampling from the buffer
device : torch.device
The device on which the buffer instances should be stored
state_size : int
The number of dimensions in the state feature vector
action_size : int
The number of dimensions in the action vector
"""
super().__init__(capacity, seed, state_size, action_size, device)
self.cast = torch.from_numpy
def init_buffer(self):
self.state_buffer = torch.FloatTensor(self.capacity, *self.state_size)
self.state_buffer = self.state_buffer.to(self.device)
self.next_state_buffer = torch.FloatTensor(self.capacity,
*self.state_size)
self.next_state_buffer = self.next_state_buffer.to(self.device)
self.action_buffer = torch.FloatTensor(self.capacity, self.action_size)
self.action_buffer = self.action_buffer.to(self.device)
self.reward_buffer = torch.FloatTensor(self.capacity, 1)
self.reward_buffer = self.reward_buffer.to(self.device)
self.done_buffer = torch.FloatTensor(self.capacity, 1)
self.done_buffer = self.done_buffer.to(self.device)
| 9,362 | 33.422794 | 79 | py |
GreedyAC | GreedyAC-master/agent/Random.py | #!/usr/bin/env python3
# Adapted from https://github.com/pranz24/pytorch-soft-actor-critic
# Import modules
import torch
import numpy as np
from agent.baseAgent import BaseAgent
class Random(BaseAgent):
"""
Random implement a random agent, which is one which samples uniformly from
all available actions.
"""
def __init__(self, action_space, seed):
super().__init__()
self.batch = False
self.action_dims = len(action_space.high)
self.action_low = action_space.low
self.action_high = action_space.high
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.policy = torch.distributions.Uniform(
torch.Tensor(action_space.low), torch.Tensor(action_space.high))
def sample_action(self, _):
"""
Samples an action from the agent
Parameters
----------
_ : np.array
The state feature vector
Returns
-------
array_like of float
The action to take
"""
action = self.policy.sample()
return action.detach().cpu().numpy()
def sample_action_(self, _, size):
"""
sample_action_ is like sample_action, except the rng for
action selection in the environment is not affected by running
this function.
"""
return self.rng.uniform(self.action_low, self.action_high,
size=(size, self.action_dims))
def update(self, _, _1, _2, _3, _4):
pass
def update_value_fn(self, _, _1, _2, _3, _4, _5):
pass
def reset(self):
"""
Resets the agent between episodes
"""
pass
def eval(self):
pass
def train(self):
pass
# Save model parameters
def save_model(self, _, _1="", _2=None, _3=None):
pass
# Load model parameters
def load_model(self, _, _1):
pass
def get_parameters(self):
pass
| 2,248 | 24.556818 | 78 | py |
GreedyAC | GreedyAC-master/agent/baseAgent.py | #!/usr/bin/env python3
# Import modules
from abc import ABC, abstractmethod
# TODO: Given a data dictionary generated by main, create a static
# function to initialize any agent based on this dict. Note that since the
# dict has the agent name, only one function is needed to create ANY agent
# we could also use the experiment util create_agent() function
class BaseAgent(ABC):
"""
Class BaseAgent implements the base functionality for all agents
Attributes
----------
self.batch : bool
Whether or not the agent is using batch updates, by default False.
self.info : dict
A dictionary which records agent info
"""
def __init__(self):
"""
Constructor
"""
self.batch = False
self.info = {}
"""
BaseAgent is the abstract base class for all agents
"""
@abstractmethod
def sample_action(self, state):
"""
Samples an action from the agent
Parameters
----------
state : np.array
The state feature vector
Returns
-------
array_like of float
The action to take
"""
pass
@abstractmethod
def update(self, state, action, reward, next_state, done_mask):
"""
Takes a single update step, which may be a number of offline
batch updates
Parameters
----------
state : np.array or array_like of np.array
The state feature vector
action : np.array of float or array_like of np.array
The action taken
reward : float or array_like of float
The reward seen by the agent after taking the action
next_state : np.array or array_like of np.array
The feature vector of the next state transitioned to after the
agent took the argument action
done_mask : bool or array_like of bool
False if the agent reached the goal, True if the agent did not
reach the goal yet the episode ended (e.g. max number of steps
reached)
Return
------
4-tuple of array_like
A tuple containing array_like, each of which contains the states,
actions, rewards, and next states used in the update
"""
pass
@abstractmethod
def reset(self):
"""
Resets the agent between episodes
"""
pass
@abstractmethod
def eval(self):
"""
Sets the agent into offline evaluation mode, where the agent will not
explore
"""
pass
@abstractmethod
def train(self):
"""
Sets the agent to online training mode, where the agent will explore
"""
pass
@abstractmethod
def get_parameters(self):
"""
Gets all learned agent parameters such that training can be resumed.
Gets all parameters of the agent such that, if given the
hyperparameters of the agent, training is resumable from this exact
point. This include the learned average reward, the learned entropy,
and other such learned values if applicable. This does not only apply
to the weights of the agent, but *all* values that have been learned
or calculated during training such that, given these values, training
can be resumed from this exact point.
For example, in the LinearAC class, we must save not only the actor
and critic weights, but also the accumulated eligibility traces.
Returns
-------
dict of str to int, float, array_like, and/or torch.Tensor
The agent's weights
"""
pass
| 3,716 | 28.975806 | 77 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/VACDiscrete.py | # Import modules
import torch
import inspect
import time
from gym.spaces import Box, Discrete
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
from agent.baseAgent import BaseAgent
import agent.nonlinear.nn_utils as nn_utils
from agent.nonlinear.policy.MLP import Softmax
from agent.nonlinear.value_function.MLP import Q as QMLP
from utils.experience_replay import TorchBuffer as ExperienceReplay
class VACDiscrete(BaseAgent):
def __init__(self, num_inputs, action_space, gamma, tau, alpha, policy,
target_update_interval, critic_lr, actor_lr_scale,
actor_hidden_dim, critic_hidden_dim,
replay_capacity, seed, batch_size, betas, cuda=False,
clip_stddev=1000, init=None, activation="relu"):
"""
Constructor
Parameters
----------
num_inputs : int
The number of input features
action_space : gym.spaces.Space
The action space from the gym environment
gamma : float
The discount factor
tau : float
The weight of the weighted average, which performs the soft update
to the target critic network's parameters toward the critic
network's parameters, that is: target_parameters =
((1 - τ) * target_parameters) + (τ * source_parameters)
alpha : float
The entropy regularization temperature. See equation (1) in paper.
policy : str
The type of policy, currently, only support "softmax"
target_update_interval : int
The number of updates to perform before the target critic network
is updated toward the critic network
critic_lr : float
The critic learning rate
actor_lr : float
The actor learning rate
actor_hidden_dim : int
The number of hidden units in the actor's neural network
critic_hidden_dim : int
The number of hidden units in the critic's neural network
replay_capacity : int
The number of transitions stored in the replay buffer
seed : int
The random seed so that random samples of batches are repeatable
batch_size : int
The number of elements in a batch for the batch update
cuda : bool, optional
Whether or not cuda should be used for training, by default False.
Note that if True, cuda is only utilized if available.
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
Raises
------
ValueError
If the batch size is larger than the replay buffer
"""
super().__init__()
self.batch = True
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.is_training = True
self.gamma = gamma
self.tau = tau
self.alpha = alpha
self.discrete_action = isinstance(action_space, Discrete)
self.state_dims = num_inputs
self.device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
if isinstance(action_space, Box):
raise ValueError("VACDiscrete can only be used with " +
"discrete actions")
elif isinstance(action_space, Discrete):
self.action_dims = 1
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed,
(num_inputs,), 1, self.device)
self.batch_size = batch_size
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self.target_update_interval = target_update_interval
self.update_number = 0
# Create the critic Q function
if isinstance(action_space, Box):
action_shape = action_space.shape[0]
elif isinstance(action_space, Discrete):
action_shape = 1
self.critic = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr,
betas=betas)
self.critic_target = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
self.device)
nn_utils.hard_update(self.critic_target, self.critic)
self.policy_type = policy.lower()
actor_lr = actor_lr_scale * critic_lr
if self.policy_type == "softmax":
self.num_actions = action_space.n
self.policy = Softmax(num_inputs, self.num_actions,
actor_hidden_dim, activation,
init).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=actor_lr,
betas=betas)
else:
raise NotImplementedError(f"policy type {policy} not implemented")
source = inspect.getsource(inspect.getmodule(inspect.currentframe()))
self.info = {}
self.info = {
"source": source,
}
def sample_action(self, state):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.is_training:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
act = action.detach().cpu().numpy()[0]
if not self.discrete_action:
return act
else:
return int(act)
def update(self, state, action, reward, next_state, done_mask):
if self.discrete_action:
action = np.array([action])
# Keep transition in replay buffer
self.replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self.replay.sample(batch_size=self.batch_size)
if state_batch is None:
# Not enough samples in buffer
return
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
next_state_action, _, _ = \
self.policy.sample(next_state_batch)
qf_next_value = self.critic_target(next_state_batch,
next_state_action)
q_target = reward_batch + mask_batch * self.gamma * qf_next_value
q_prediction = self.critic(state_batch, action_batch)
q_loss = F.mse_loss(q_prediction, q_target)
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Calculate the actor loss using Eqn(5) in FKL/RKL paper
# No need to use a baseline in this setting
state_batch = state_batch.repeat_interleave(self.num_actions, dim=0)
actions = torch.tensor([n for n in range(self.num_actions)])
actions = actions.repeat(self.batch_size)
actions = actions.unsqueeze(-1)
q = self.critic(state_batch, actions)
log_prob = self.policy.log_prob(state_batch, actions)
prob = log_prob.exp()
with torch.no_grad():
scale = q - log_prob * self.alpha
policy_loss = prob * scale
policy_loss = policy_loss.reshape([self.batch_size, self.num_actions])
policy_loss = -policy_loss.sum(dim=1).mean()
# Update the actor
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
# Update target network
self.update_number += 1
if self.update_number % self.target_update_interval == 0:
self.update_number = 0
nn_utils.soft_update(self.critic_target, self.critic, self.tau)
def reset(self):
pass
def eval(self):
self.is_training = False
def train(self):
self.is_training = True
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
def load_model(self, actor_path, critic_path):
pass
def get_parameters(self):
pass
| 9,344 | 37.29918 | 78 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/GreedyAC.py | # Import modules
from gym.spaces import Box, Discrete
import torch
import torch.nn.functional as F
from torch.optim import Adam
import numpy as np
from agent.baseAgent import BaseAgent
from utils.experience_replay import TorchBuffer as ExperienceReplay
from agent.nonlinear.value_function.MLP import Q as QMLP
from agent.nonlinear.policy.MLP import SquashedGaussian, Gaussian, Softmax
import agent.nonlinear.nn_utils as nn_utils
import inspect
class GreedyAC(BaseAgent):
"""
GreedyAC implements the GreedyAC algorithm with continuous actions.
"""
def __init__(self, num_inputs, action_space, gamma, tau, alpha, policy,
target_update_interval, critic_lr, actor_lr_scale,
actor_hidden_dim, critic_hidden_dim, replay_capacity, seed,
batch_size, rho, num_samples, betas, env, cuda=False,
clip_stddev=1000, init=None, entropy_from_single_sample=True,
activation="relu"):
super().__init__()
self.batch = True
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.is_training = True
self.entropy_from_single_sample = entropy_from_single_sample
self.gamma = gamma
self.tau = tau # Polyak average
self.alpha = alpha # Entropy scale
self.state_dims = num_inputs
self.discrete_action = isinstance(action_space, Discrete)
self.action_space = action_space
self.device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
if isinstance(action_space, Box):
self.action_dims = len(action_space.high)
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed,
env.observation_space.shape,
action_space.shape[0], self.device)
elif isinstance(action_space, Discrete):
self.action_dims = 1
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed,
env.observation_space.shape,
1, self.device)
self.batch_size = batch_size
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self.target_update_interval = target_update_interval
self.update_number = 0
# For GreedyAC update
self.rho = rho
self.num_samples = num_samples
# Create the critic Q function
if isinstance(action_space, Box):
action_shape = action_space.shape[0]
elif isinstance(action_space, Discrete):
action_shape = 1
self.critic = QMLP(num_inputs, action_shape, critic_hidden_dim,
init, activation).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr,
betas=betas)
self.critic_target = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
self.device)
nn_utils.hard_update(self.critic_target, self.critic)
self._create_policies(policy, num_inputs, action_space,
actor_hidden_dim, clip_stddev, init, activation)
actor_lr = actor_lr_scale * critic_lr
self.policy_optim = Adam(self.policy.parameters(), lr=actor_lr,
betas=betas)
self.sampler_optim = Adam(self.sampler.parameters(), lr=actor_lr,
betas=betas)
nn_utils.hard_update(self.sampler, self.policy)
self.is_training = True
source = inspect.getsource(inspect.getmodule(inspect.currentframe()))
self.info["source"] = source
def update(self, state, action, reward, next_state, done_mask):
# Adjust action shape to ensure it fits in replay buffer properly
if self.discrete_action:
action = np.array([action])
# Keep transition in replay buffer
self.replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self.replay.sample(batch_size=self.batch_size)
if state_batch is None:
# Too few samples in the buffer to sample
return
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
next_state_action, _, _ = self.policy.sample(next_state_batch)
with torch.no_grad():
next_q = self.critic_target(next_state_batch, next_state_action)
target_q_value = reward_batch + mask_batch * self.gamma * next_q
q_value = self.critic(state_batch, action_batch)
# Calculate the loss on the critic
# JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
q_loss = F.mse_loss(target_q_value, q_value)
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Update target networks
self.update_number += 1
if self.update_number % self.target_update_interval == 0:
self.update_number = 0
nn_utils.soft_update(self.critic_target, self.critic, self.tau)
# Sample actions from the sampler to determine which to update
# with
action_batch, _, _, = self.sampler.sample(state_batch,
self.num_samples)
action_batch = action_batch.permute(1, 0, 2)
action_batch = action_batch.reshape(self.batch_size * self.num_samples,
self.action_dims)
stacked_s_batch = state_batch.repeat_interleave(self.num_samples,
dim=0)
# Get the values of the sampled actions and find the best
# ϱ * num_samples actions
with torch.no_grad():
q_values = self.critic(stacked_s_batch, action_batch)
q_values = q_values.reshape(self.batch_size, self.num_samples,
1)
sorted_q = torch.argsort(q_values, dim=1, descending=True)
best_ind = sorted_q[:, :int(self.rho * self.num_samples)]
best_ind = best_ind.repeat_interleave(self.action_dims, -1)
action_batch = action_batch.reshape(self.batch_size, self.num_samples,
self.action_dims)
best_actions = torch.gather(action_batch, 1, best_ind)
# Reshape samples for calculating the loss
samples = int(self.rho * self.num_samples)
stacked_s_batch = state_batch.repeat_interleave(samples, dim=0)
best_actions = torch.reshape(best_actions, (-1, self.action_dims))
# Actor loss
# print(stacked_s_batch.shape, best_actions.shape)
# print("Computing actor loss")
policy_loss = self.policy.log_prob(stacked_s_batch, best_actions)
policy_loss = -policy_loss.mean()
# Update actor
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
# Calculate sampler entropy
stacked_s_batch = state_batch.repeat_interleave(self.num_samples,
dim=0)
stacked_s_batch = stacked_s_batch.reshape(-1, self.state_dims)
action_batch = action_batch.reshape(-1, self.action_dims)
sampler_entropy = self.sampler.log_prob(stacked_s_batch, action_batch)
with torch.no_grad():
sampler_entropy *= sampler_entropy
sampler_entropy = sampler_entropy.reshape(self.batch_size,
self.num_samples, 1)
if self.entropy_from_single_sample:
sampler_entropy = -sampler_entropy[:, 0, :]
else:
sampler_entropy = -sampler_entropy.mean(axis=1)
# Calculate sampler loss
stacked_s_batch = state_batch.repeat_interleave(samples, dim=0)
sampler_loss = self.sampler.log_prob(stacked_s_batch, best_actions)
sampler_loss = sampler_loss.reshape(self.batch_size, samples, 1)
sampler_loss = sampler_loss.mean(axis=1)
sampler_loss = sampler_loss + (sampler_entropy * self.alpha)
sampler_loss = -sampler_loss.mean()
# Update the sampler
self.sampler_optim.zero_grad()
sampler_loss.backward()
self.sampler_optim.step()
def sample_action(self, state):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.is_training:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
act = action.detach().cpu().numpy()[0]
if not self.discrete_action:
return act
else:
return int(act[0])
def reset(self):
pass
def eval(self):
self.is_training = False
def train(self):
self.is_training = True
def _create_policies(self, policy, num_inputs, action_space,
actor_hidden_dim, clip_stddev, init, activation):
self.policy_type = policy.lower()
if self.policy_type == "gaussian":
self.policy = Gaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev,
init).to(self.device)
self.sampler = Gaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev,
init).to(self.device)
elif self.policy_type == "squashedgaussian":
self.policy = SquashedGaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev,
init).to(self.device)
self.sampler = SquashedGaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev,
init).to(self.device)
elif self.policy_type == "softmax":
num_actions = action_space.n
self.policy = Softmax(num_inputs, num_actions,
actor_hidden_dim, activation,
action_space, init).to(self.device)
self.sampler = Softmax(num_inputs, num_actions,
actor_hidden_dim, activation,
action_space, init).to(self.device)
else:
raise NotImplementedError
def get_parameters(self):
pass
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
def load_model(self, actor_path, critic_path):
pass
| 11,905 | 40.340278 | 79 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/GreedyACDiscrete.py | # Import modules
from gym.spaces import Box, Discrete
import inspect
import torch
import torch.nn.functional as F
from torch.optim import Adam
import numpy as np
from agent.baseAgent import BaseAgent
from utils.experience_replay import TorchBuffer as ExperienceReplay
from agent.nonlinear.value_function.MLP import Q as QMLP
from agent.nonlinear.policy.MLP import Softmax
import agent.nonlinear.nn_utils as nn_utils
class GreedyACDiscrete(BaseAgent):
"""
GreedyACDiscrete implements the GreedyAC algorithm with discrete actions
"""
def __init__(self, num_inputs, action_space, gamma, tau, policy,
target_update_interval, critic_lr, actor_lr_scale,
actor_hidden_dim, critic_hidden_dim, replay_capacity, seed,
batch_size, betas, cuda=False,
clip_stddev=1000, init=None, entropy_from_single_sample=True,
activation="relu"):
super().__init__()
self.batch = True
# The number of top actions to increase the probability of taking
self.top_actions = 1
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.is_training = True
self.entropy_from_single_sample = entropy_from_single_sample
self.gamma = gamma
self.tau = tau # Polyak average
self.state_dims = num_inputs
self.device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
if isinstance(action_space, Discrete):
self.action_dims = 1
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed,
(num_inputs,), 1, self.device)
else:
raise ValueError("GreedyACDiscrete must use discrete action")
self.batch_size = batch_size
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self.target_update_interval = target_update_interval
self.update_number = 0
# Create the critic Q function
if isinstance(action_space, Box):
raise ValueError("GreedyACDiscrete must use discrete actions")
elif isinstance(action_space, Discrete):
action_shape = 1
self.critic = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr,
betas=betas)
self.critic_target = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
self.device)
nn_utils.hard_update(self.critic_target, self.critic)
self._create_policies(policy, num_inputs, action_space,
actor_hidden_dim, clip_stddev, init, activation)
actor_lr = actor_lr_scale * critic_lr
self.policy_optim = Adam(self.policy.parameters(), lr=actor_lr,
betas=betas)
self.is_training = True
source = inspect.getsource(inspect.getmodule(inspect.currentframe()))
self.info = {}
self.info = {
"action_values": [],
"source": source,
}
def update(self, state, action, reward, next_state, done_mask):
# Adjust action shape to ensure it fits in replay buffer properly
action = np.array([action])
# Keep transition in replay buffer
self.replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self.replay.sample(batch_size=self.batch_size)
if state_batch is None:
# Not enough samples in buffer
return
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
next_state_action, _, _ = \
self.policy.sample(next_state_batch)
next_q = self.critic_target(next_state_batch, next_state_action)
target_q_value = reward_batch + mask_batch * self.gamma * next_q
q_value = self.critic(state_batch, action_batch)
# Calculate the loss on the critic
q_loss = F.mse_loss(target_q_value, q_value)
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Update target networks
self.update_number += 1
if self.update_number % self.target_update_interval == 0:
self.update_number = 0
nn_utils.soft_update(self.critic_target, self.critic, self.tau)
# Sample actions from the sampler to determine which to update
# with
with torch.no_grad():
action_batch = self.sampler(state_batch)
stacked_s_batch = state_batch.repeat_interleave(self.num_actions,
dim=0)
# Get the values of the sampled actions and find the best
# self.top_actions actions
with torch.no_grad():
q_values = self.critic(stacked_s_batch, action_batch)
q_values = q_values.reshape(self.batch_size, self.num_actions,
1)
sorted_q = torch.argsort(q_values, dim=1, descending=True)
best_ind = sorted_q[:, :self.top_actions]
best_ind = best_ind.repeat_interleave(self.action_dims, -1)
action_batch = action_batch.reshape(self.batch_size, self.num_actions,
self.action_dims)
best_actions = torch.gather(action_batch, 1, best_ind)
# Reshape samples for calculating the loss
stacked_s_batch = state_batch.repeat_interleave(self.top_actions,
dim=0)
best_actions = torch.reshape(best_actions, (-1, self.action_dims))
# Actor loss
# print(stacked_s_batch.shape, best_actions.shape)
# print("Computing actor loss")
policy_loss = self.policy.log_prob(stacked_s_batch, best_actions)
policy_loss = -policy_loss.mean()
# Update actor
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
def sample_action(self, state):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.is_training:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
act = action.detach().cpu().numpy()[0][0]
return act
def reset(self):
pass
def eval(self):
self.is_training = False
def train(self):
self.is_training = True
def _create_policies(self, policy, num_inputs, action_space,
actor_hidden_dim, clip_stddev, init, activation):
self.policy_type = policy.lower()
if self.policy_type == "softmax":
self.num_actions = action_space.n
self.policy = Softmax(num_inputs, self.num_actions,
actor_hidden_dim, activation,
init).to(self.device)
# Sampler returns every available action in each state
def sample(state_batch):
batch_size = state_batch.shape[0]
actions = torch.tensor([n for n in range(self.num_actions)])
actions = actions.repeat(batch_size).unsqueeze(-1)
return actions
self.sampler = sample
else:
raise NotImplementedError
def get_parameters(self):
pass
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
def load_model(self, actor_path, critic_path):
pass
| 8,572 | 36.436681 | 78 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/SAC.py | # Import modules
import torch
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
from agent.baseAgent import BaseAgent
import agent.nonlinear.nn_utils as nn_utils
from agent.nonlinear.policy.MLP import SquashedGaussian, Gaussian
from agent.nonlinear.value_function.MLP import DoubleQ, Q
from utils.experience_replay import TorchBuffer as ExperienceReplay
import inspect
class SAC(BaseAgent):
"""
SAC implements the Soft Actor-Critic algorithm for continuous action spaces
as found in the paper https://arxiv.org/pdf/1812.05905.pdf.
"""
def __init__(
self,
gamma,
tau,
alpha,
policy,
target_update_interval,
critic_lr,
actor_lr_scale,
alpha_lr,
actor_hidden_dim,
critic_hidden_dim,
replay_capacity,
seed,
batch_size,
betas,
env,
baseline_actions=-1,
reparameterized=True,
soft_q=True,
double_q=True,
num_samples=1,
automatic_entropy_tuning=False,
cuda=False,
clip_stddev=1000,
init=None,
activation="relu",
):
"""
Constructor
Parameters
----------
gamma : float
The discount factor
tau : float
The weight of the weighted average, which performs the soft update
to the target critic network's parameters toward the critic
network's parameters, that is: target_parameters =
((1 - τ) * target_parameters) + (τ * source_parameters)
alpha : float
The entropy regularization temperature. See equation (1) in paper.
policy : str
The type of policy, currently, only support "gaussian"
target_update_interval : int
The number of updates to perform before the target critic network
is updated toward the critic network
critic_lr : float
The critic learning rate
actor_lr : float
The actor learning rate
alpha_lr : float
The learning rate for the entropy parameter, if using an automatic
entropy tuning algorithm (see automatic_entropy_tuning) parameter
below
actor_hidden_dim : int
The number of hidden units in the actor's neural network
critic_hidden_dim : int
The number of hidden units in the critic's neural network
replay_capacity : int
The number of transitions stored in the replay buffer
seed : int
The random seed so that random samples of batches are repeatable
batch_size : int
The number of elements in a batch for the batch update
automatic_entropy_tuning : bool, optional
Whether the agent should automatically tune its entropy
hyperparmeter alpha, by default False
cuda : bool, optional
Whether or not cuda should be used for training, by default False.
Note that if True, cuda is only utilized if available.
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
soft_q : bool
Whether or not to learn soft Q functions, by default True. The
original SAC uses soft Q functions since we learn an
entropy-regularized policy. When learning an entropy regularized
policy, guaranteed policy improvement (in the ideal case) only
exists with respect to soft action values.
reparameterized : bool
Whether to use the reparameterization trick to learn the policy or
to use the log-likelihood trick. The original SAC uses the
reparameterization trick.
double_q : bool
Whether or not to use a double Q critic, by default True
num_samples : int
The number of samples to use to estimate the gradient when using a
likelihood-based SAC (i.e. `reparameterized == False`), by default
1.
Raises
------
ValueError
If the batch size is larger than the replay buffer
"""
super().__init__()
self._env = env
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
if reparameterized and num_samples != 1:
raise ValueError
action_space = env.action_space
self._action_space = action_space
obs_space = env.observation_space
self._obs_space = obs_space
if len(obs_space.shape) != 1:
raise ValueError("SAC only supports vector observations")
self._baseline_actions = baseline_actions
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks.
self._torch_rng = torch.manual_seed(seed)
self._rng = np.random.default_rng(seed)
# Random hypers and fields
self._is_training = True
self._gamma = gamma
self._tau = tau
self._reparameterized = reparameterized
self._soft_q = soft_q
self._double_q = double_q
if num_samples < 1:
raise ValueError("cannot have num_samples < 1")
self._num_samples = num_samples # Sample for likelihood-based gradient
self._device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
# Experience replay buffer
self._batch_size = batch_size
self._replay = ExperienceReplay(replay_capacity, seed, obs_space.shape,
action_space.shape[0], self._device)
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self._target_update_interval = target_update_interval
self._update_number = 0
# Automatic entropy tuning
self._automatic_entropy_tuning = automatic_entropy_tuning
self._alpha_lr = alpha_lr
if self._automatic_entropy_tuning and self._alpha_lr <= 0:
raise ValueError("should not use entropy lr <= 0")
# Set up the critic and target critic
self._init_critic(
obs_space,
action_space,
critic_hidden_dim,
init,
activation,
critic_lr,
betas,
)
# Set up the policy
self._policy_type = policy.lower()
actor_lr = actor_lr_scale * critic_lr
self._init_policy(
obs_space,
action_space,
actor_hidden_dim,
init,
activation,
actor_lr,
betas,
clip_stddev,
)
# Set up auto entropy tuning
if self._automatic_entropy_tuning:
self._target_entropy = -torch.prod(
torch.Tensor(action_space.shape).to(self._device)
).item()
self._log_alpha = torch.zeros(
1,
requires_grad=True,
device=self._device,
)
self._alpha = self._log_alpha.exp().detach()
self._alpha_optim = Adam([self._log_alpha], lr=self._alpha_lr)
else:
self._alpha = alpha # Entropy scale
source = inspect.getsource(inspect.getmodule(inspect.currentframe()))
self.info["source"] = source
def sample_action(self, state):
state = torch.FloatTensor(state).to(self._device).unsqueeze(0)
if self._is_training:
action = self._policy.rsample(state)[0]
else:
action = self._policy.rsample(state)[3]
return action.detach().cpu().numpy()[0]
def update(self, state, action, reward, next_state, done_mask):
# Keep transition in replay buffer
self._replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self._replay.sample(batch_size=self._batch_size)
if state_batch is None:
return
self._update_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
self._update_actor(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
def _update_actor(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
"""
Update the actor given a batch of transitions sampled from a replay
buffer.
"""
# Calculate the actor loss
if self._reparameterized:
# Reparameterization trick
if self._baseline_actions > 0:
pi, log_pi = self._policy.rsample(
state_batch,
num_samples=self._baseline_actions+1,
)[:2]
pi = pi.transpose(0, 1).reshape(
-1,
self._action_space.high.shape[0],
)
s_state_batch = state_batch.repeat_interleave(
self._baseline_actions + 1,
dim=0,
)
q = self._get_q(s_state_batch, pi)
q = q.reshape(self._batch_size, self._baseline_actions + 1, -1)
# Don't backprop through the approximate state-value baseline
baseline = q[:, 1:].mean(axis=1).squeeze().detach()
log_pi = log_pi[0, :, 0]
q = q[:, 0, 0]
q -= baseline
else:
pi, log_pi = self._policy.rsample(state_batch)[:2]
q = self._get_q(state_batch, pi)
policy_loss = ((self._alpha * log_pi) - q).mean()
else:
# Log likelihood trick
baseline = 0
if self._baseline_actions > 0:
with torch.no_grad():
pi = self._policy.sample(
state_batch,
num_samples=self._baseline_actions,
)[0]
pi = pi.transpose(0, 1).reshape(
-1,
self._action_space.high.shape[0],
)
s_state_batch = state_batch.repeat_interleave(
self._baseline_actions,
dim=0,
)
q = self._get_q(s_state_batch, pi)
q = q.reshape(
self._batch_size,
self._baseline_actions,
-1,
)
baseline = q[:, 1:].mean(axis=1)
sample = self._policy.sample(
state_batch,
self._num_samples,
)
pi, log_pi = sample[:2] # log_pi is differentiable
if self._num_samples > 1:
pi = pi.reshape(self._num_samples * self._batch_size, -1)
state_batch = state_batch.repeat(self._num_samples, 1)
with torch.no_grad():
# Context manager ensures that we don't backprop through the q
# function when minimizing the policy loss
q = self._get_q(state_batch, pi)
q -= baseline
# Compute the policy loss
log_pi = log_pi.reshape(self._num_samples * self._batch_size, -1)
with torch.no_grad():
scale = self._alpha * log_pi - q
policy_loss = log_pi * scale
policy_loss = policy_loss.mean()
# Update the actor
self._policy_optim.zero_grad()
policy_loss.backward()
self._policy_optim.step()
# Tune the entropy if appropriate
if self._automatic_entropy_tuning:
alpha_loss = -(self._log_alpha *
(log_pi + self._target_entropy).detach()).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.exp().detach()
def reset(self):
pass
def eval(self):
self._is_training = False
def train(self):
self._is_training = True
# Save model parameters
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
# Load model parameters
def load_model(self, actor_path, critic_path):
pass
def get_parameters(self):
pass
def _init_critic(self, obs_space, action_space, critic_hidden_dim, init,
activation, critic_lr, betas):
"""
Initializes the critic
"""
num_inputs = obs_space.shape[0]
if self._double_q:
critic_type = DoubleQ
else:
critic_type = Q
self._critic = critic_type(
num_inputs,
action_space.shape[0],
critic_hidden_dim,
init,
activation,
).to(device=self._device)
self._critic_target = critic_type(
num_inputs,
action_space.shape[0],
critic_hidden_dim,
init,
activation,
).to(self._device)
# Ensure critic and target critic share the same parameters at the
# beginning of training
nn_utils.hard_update(self._critic_target, self._critic)
self._critic_optim = Adam(
self._critic.parameters(),
lr=critic_lr,
betas=betas,
)
def _init_policy(self, obs_space, action_space, actor_hidden_dim, init,
activation, actor_lr, betas, clip_stddev):
"""
Initializes the policy
"""
num_inputs = obs_space.shape[0]
if self._policy_type == "squashedgaussian":
self._policy = SquashedGaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev,
init).to(self._device)
elif self._policy_type == "gaussian":
self._policy = Gaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation, action_space,
clip_stddev, init).to(self._device)
else:
raise NotImplementedError(f"policy {self._policy_type} unknown")
self._policy_optim = Adam(
self._policy.parameters(),
lr=actor_lr,
betas=betas,
)
def _get_q(self, state_batch, action_batch):
"""
Gets the Q values for `action_batch` actions in `state_batch` states
from the critic, rather than the target critic.
Parameters
----------
state_batch : torch.Tensor
The batch of states to calculate the action values in. Of the form
(batch_size, state_dims).
action_batch : torch.Tensor
The batch of actions to calculate the action values of in each
state. Of the form (batch_size, action_dims).
"""
if self._double_q:
q1, q2 = self._critic(state_batch, action_batch)
return torch.min(q1, q2)
else:
return self._critic(state_batch, action_batch)
def _update_critic(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
"""
Update the critic(s) given a batch of transitions sampled from a replay
buffer.
"""
if self._double_q:
self._update_double_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
else:
self._update_single_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
# Increment the running total of updates and update the critic target
# if needed
self._update_number += 1
if self._update_number % self._target_update_interval == 0:
self._update_number = 0
nn_utils.soft_update(self._critic_target, self._critic, self._tau)
def _update_single_critic(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
"""
Update the critic using a batch of transitions when using a single Q
critic.
"""
if self._double_q:
raise ValueError("cannot call _update_single_critic when using " +
"a double Q critic")
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
# Sample an action in the next state for the SARSA update
next_state_action, next_state_log_pi = \
self._policy.sample(next_state_batch)[:2]
if len(next_state_log_pi.shape) == 1:
next_state_log_pi = next_state_log_pi.unsqueeze(-1)
# Calculate the Q value of the next action in the next state
q_next = self._critic_target(next_state_batch,
next_state_action)
if self._soft_q:
q_next -= self._alpha * next_state_log_pi
# Calculate the target for the SARSA update
q_target = reward_batch + mask_batch * self._gamma * q_next
# Calculate the Q value of each action in each respective state
q = self._critic(state_batch, action_batch)
# Calculate the loss between the target and estimate Q values
q_loss = F.mse_loss(q, q_target)
# Update the critic
self._critic_optim.zero_grad()
q_loss.backward()
self._critic_optim.step()
def _update_double_critic(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
"""
Update the critic using a batch of transitions when using a double Q
critic.
"""
if not self._double_q:
raise ValueError("cannot call _update_single_critic when using " +
"a double Q critic")
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
# Sample an action in the next state for the SARSA update
next_state_action, next_state_log_pi = \
self._policy.sample(next_state_batch)[:2]
# Calculate the action values for the next state
next_q1, next_q2 = self._critic_target(next_state_batch,
next_state_action)
# Double Q: target uses the minimum of the two computed action
# values
min_next_q = torch.min(next_q1, next_q2)
# If using soft action value functions, then adjust the target
if self._soft_q:
min_next_q -= self._alpha * next_state_log_pi
# Calculate the target for the action value function update
q_target = reward_batch + mask_batch * self._gamma * min_next_q
# Calculate the two Q values of each action in each respective state
q1, q2 = self._critic(state_batch, action_batch)
# Calculate the losses on each critic
# JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
q1_loss = F.mse_loss(q1, q_target)
# JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
q2_loss = F.mse_loss(q2, q_target)
q_loss = q1_loss + q2_loss
# Update the critic
self._critic_optim.zero_grad()
q_loss.backward()
self._critic_optim.step()
| 20,671 | 35.587611 | 79 | py |
Subsets and Splits