id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
2,285,900
densenet.py
cvlab-yonsei_RankMixup/calibrate/net/densenet.py
''' Pytorch impplementation of DenseNet. Reference: [1] Gao Huang, Zhuang Liu, and Kilian Q. Weinberger. Densely connected convolutional networks. arXiv preprint arXiv:1608.06993, 2016a. ''' import math import torch import torch.nn as nn import torch.nn.functional as F class Bottleneck(nn.Module): def __init__(self, in_planes, growth_rate): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(4*growth_rate) self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = self.conv2(F.relu(self.bn2(out))) out = torch.cat([out,x], 1) return out class Transition(nn.Module): def __init__(self, in_planes, out_planes): super(Transition, self).__init__() self.bn = nn.BatchNorm2d(in_planes) self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False) def forward(self, x): out = self.conv(F.relu(self.bn(x))) out = F.avg_pool2d(out, 2) return out class DenseNet(nn.Module): def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10, temp=1.0): super(DenseNet, self).__init__() self.growth_rate = growth_rate self.temp = temp num_planes = 2*growth_rate self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) num_planes += nblocks[0]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans1 = Transition(num_planes, out_planes) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) num_planes += nblocks[1]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans2 = Transition(num_planes, out_planes) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) num_planes += nblocks[2]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans3 = Transition(num_planes, out_planes) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) num_planes += nblocks[3]*growth_rate self.bn = nn.BatchNorm2d(num_planes) self.linear = nn.Linear(num_planes, num_classes) def _make_dense_layers(self, block, in_planes, nblock): layers = [] for i in range(nblock): layers.append(block(in_planes, self.growth_rate)) in_planes += self.growth_rate return nn.Sequential(*layers) def forward_feature(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.relu(self.bn(out)), 4) out = out.view(out.size(0), -1) # out = self.linear(out) / self.temp return out def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.relu(self.bn(out)), 4) out = out.view(out.size(0), -1) out = self.linear(out) / self.temp return out class DenseNet_Tiny(nn.Module): def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10, temp=1.0): super(DenseNet_Tiny, self).__init__() self.growth_rate = growth_rate self.temp = temp num_planes = 2*growth_rate self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) num_planes += nblocks[0]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans1 = Transition(num_planes, out_planes) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) num_planes += nblocks[1]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans2 = Transition(num_planes, out_planes) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) num_planes += nblocks[2]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans3 = Transition(num_planes, out_planes) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) num_planes += nblocks[3]*growth_rate self.bn = nn.BatchNorm2d(num_planes) self.linear = nn.Linear(num_planes, num_classes) def _make_dense_layers(self, block, in_planes, nblock): layers = [] for i in range(nblock): layers.append(block(in_planes, self.growth_rate)) in_planes += self.growth_rate return nn.Sequential(*layers) def forward_feature(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.relu(self.bn(out)), 8) out = out.view(out.size(0), -1) # out = self.linear(out) / self.temp return out def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.relu(self.bn(out)), 8) out = out.view(out.size(0), -1) out = self.linear(out) / self.temp return out def densenet121(temp=1.0, **kwargs): return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, temp=temp, **kwargs) def densenet121_tiny(temp=1.0, **kwargs): return DenseNet_Tiny(Bottleneck, [6,12,24,16], growth_rate=32, temp=temp, **kwargs) def densenet169(temp=1.0, **kwargs): return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, temp=temp, **kwargs) def densenet201(temp=1.0, **kwargs): return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, temp=temp, **kwargs) def densenet161(temp=1.0, **kwargs): return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, temp=temp, **kwargs)
6,643
Python
.py
145
37.944828
96
0.632467
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,901
wide_resnet.py
cvlab-yonsei_RankMixup/calibrate/net/wide_resnet.py
''' Pytorch implementation of wide resnet. Reference: [1] S. Zagoruyko and N. Komodakis. Wide residual networks. arXiv preprint arXiv:1605.07146, 2016. ''' import torch import torch.nn as nn import math import random import numpy as np import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): " 3x3 convolution with padding " return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion=1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Wide_ResNet(nn.Module): def __init__(self, block, layers, wfactor, num_classes=10, temp=1.0): super(Wide_ResNet, self).__init__() self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16*wfactor, layers[0]) self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2) self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2) self.avgpool = nn.AvgPool2d(8, stride=1) self.fc = nn.Linear(64*block.expansion*wfactor, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion) ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, int(blocks)): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_feature(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x class Wide_ResNet_Mixup(nn.Module): def __init__(self, block, layers, wfactor, num_classes=10, temp=1.0, mixup_alpha = 0.1, layer_mix = 0, num_mixup = 1): super(Wide_ResNet_Mixup, self).__init__() #mixup self.mixup_alpha = mixup_alpha self.num_classes = num_classes if layer_mix == None: self.layer_mix = 0 else: self.layer_mix = layer_mix if num_mixup is not None: self.num_mixup = num_mixup self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16*wfactor, layers[0]) self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2) self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2) self.avgpool = nn.AvgPool2d(8, stride=1) self.fc = nn.Linear(64*block.expansion*wfactor, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion) ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, int(blocks)): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_multimixup(self, x, target): if target is not None: target_reweighted = to_one_hot(target, self.num_classes) mixup = [] lam = [] target_re = [] if self.layer_mix == 0: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.conv1(x) x = self.bn1(x) x = F.relu(x) # x_mix = self.maxpool(x_mix) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) if self.layer_mix == 3: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x, target_re, lam def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x class Wide_ResNet_Tiny(nn.Module): def __init__(self, block, layers, wfactor, num_classes=10, temp=1.0): super(Wide_ResNet_Tiny, self).__init__() self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16*wfactor, layers[0]) self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2) self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(2) self.fc = nn.Linear(64*block.expansion*wfactor*4, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion) ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, int(blocks)): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_feature(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x class Wide_ResNet_Mixup_Tiny(nn.Module): def __init__(self, block, layers, wfactor, num_classes=10, temp=1.0, mixup_alpha = 0.1, layer_mix = 5, num_mixup = 1): super(Wide_ResNet_Mixup_Tiny, self).__init__() #mixup self.mixup_alpha = mixup_alpha self.num_classes = num_classes if layer_mix == None: self.layer_mix = 0 else: self.layer_mix = layer_mix if num_mixup is not None: self.num_mixup = num_mixup self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16*wfactor, layers[0]) self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2) self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(2) self.fc = nn.Linear(64*block.expansion*wfactor*4, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion) ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, int(blocks)): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_multimixup(self, x, target): if target is not None: target_reweighted = to_one_hot(target, self.num_classes) mixup = [] lam = [] target_re = [] if self.layer_mix == 0: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.conv1(x) x = self.bn1(x) x = F.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) if self.layer_mix == 3: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x, target_re, lam def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x def wide_resnet_cifar(temp=1.0, num_classes=10, depth=26, width=10, **kwargs): assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 return Wide_ResNet(BasicBlock, [n, n, n], width, num_classes=num_classes, temp=temp, **kwargs) def wide_resnet_tiny(temp=1.0, num_classes=200, depth=26, width=10, **kwargs): assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 return Wide_ResNet_Tiny(BasicBlock, [n, n, n], width, num_classes=num_classes, temp=temp, **kwargs) def wide_resnet_cifar_mixup(temp=1.0, num_classes=10, depth=26, width=10, **kwargs): assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 return Wide_ResNet_Mixup(BasicBlock, [n, n, n], width, num_classes=num_classes, temp=temp, **kwargs) def wide_resnet_tiny_mixup(temp=1.0, num_classes=200, depth=26, width=10, **kwargs): assert (depth - 2) % 6 == 0 n = (depth - 2) / 6 return Wide_ResNet_Mixup_Tiny(BasicBlock, [n, n, n], width, num_classes=num_classes, temp=temp, **kwargs) def mixup_process(out, target_reweighted, alpha): if alpha > 0.: lam = np.random.beta(alpha, alpha) else: lam = 1. indices = np.random.permutation(out.size(0)) out = out*lam + out[indices]*(1-lam) target_shuffled_onehot = target_reweighted[indices] target_reweighted = target_reweighted * lam + target_shuffled_onehot * (1 - lam) return out, target_reweighted, lam def to_one_hot(inp, num_classes): y_onehot = torch.zeros(inp.size(0), num_classes, device=inp.device, requires_grad=False) # y_onehot.zero_() y_onehot.scatter_(1, inp.unsqueeze(1).data, 1) return y_onehot
15,190
Python
.py
353
32.453258
118
0.574189
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,902
resnet_tiny_imagenet.py
cvlab-yonsei_RankMixup/calibrate/net/resnet_tiny_imagenet.py
''' Pytorch implementation of ResNet models. Reference: [1] He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, 2016. ''' import torch import math import torch.nn as nn import torch.nn.functional as F # --- HELPERS --- def conv3x3(in_planes, out_planes, stride=1): ''' 3x3 convolution with padding ''' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) # --- COMPONENTS --- class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=200, temp=1.0): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(2) self.fc = nn.Linear(512 * block.expansion * 4, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_feature(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) #x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) #x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x def resnet18(temp=1.0, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], temp=temp, **kwargs) return model def resnet34(temp=1.0, **kwargs): model = ResNet(BasicBlock, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet50(temp=1.0, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet101(temp=1.0, **kwargs): model = ResNet(Bottleneck, [3, 4, 23, 3], temp=temp, **kwargs) return model def resnet110(temp=1.0, **kwargs): model = ResNet(Bottleneck, [3, 4, 26, 3], temp=temp, **kwargs) return model def resnet152(temp=1.0, **kwargs): model = ResNet(Bottleneck, [3, 8, 36, 3], temp=temp, **kwargs) return model
5,585
Python
.py
145
30.462069
109
0.595357
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,903
resnet_mixup.py
cvlab-yonsei_RankMixup/calibrate/net/resnet_mixup.py
import torch import math import random import numpy as np import torch.nn as nn import torch.nn.functional as F try: from torch.hub import load_state_dict_from_url except ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', } def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) # while original implementation places the stride at the first 1x1 convolution(self.conv1) # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. # This variant is also known as ResNet V1.5 and improves accuracy according to # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, has_dropout=True, mixup_alpha = 0.1, layer_mix = 5, num_mixup = None): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer #mixup self.mixup_alpha = mixup_alpha if num_mixup is not None: self.num_mixup = num_mixup if layer_mix == None: self.layer_mix = 0 else: self.layer_mix = layer_mix self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) self.num_classes = num_classes self.has_dropout = has_dropout self.dropout = nn.Dropout(p=0.5, inplace=False) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def _forward_impl(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) if self.has_dropout: x = self.dropout(x) x = self.fc(x) return x def forward(self, x): return self._forward_impl(x) def forward_multimixup(self, x, target): if target is not None: target_reweighted = to_one_hot(target, self.num_classes) mixup = [] lam = [] target_re = [] if self.layer_mix == 0: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) if self.has_dropout: x = self.dropout(x) x = self.fc(x) return x, target_re, lam def forward_feature(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) return x def forward_feature_logit(self, x): feature = self.forward_feature(x) if self.has_dropout: logit = self.dropout(feature) else: logit = feature logit = self.fc(logit) return feature, logit class BasicBlock_Tiny(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock_Tiny, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck_Tiny(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck_Tiny, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet_Tiny(nn.Module): def __init__(self, block, layers, num_classes=200, temp=1.0, mixup_alpha = 0.1, layer_mix = 5, num_mixup = None): self.inplanes = 64 super(ResNet_Tiny, self).__init__() #mixup self.mixup_alpha = mixup_alpha self.num_classes = num_classes if layer_mix == None: self.layer_mix = 0 else: self.layer_mix = layer_mix if num_mixup is not None: self.num_mixup = num_mixup self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(2) self.fc = nn.Linear(512 * block.expansion * 4, num_classes) self.temp = temp for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward_multimixup(self, x, target): if target is not None: target_reweighted = to_one_hot(target, self.num_classes) mixup = [] lam = [] target_re = [] if self.layer_mix == 0: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) # x_mix = self.maxpool(x_mix) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) if self.layer_mix == 4: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x, target_re, lam def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) #x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x class BasicBlock_Cifar(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock_Cifar, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class Bottleneck_Cifar(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck_Cifar, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet_Cifar(nn.Module): def __init__(self, block, layers, num_classes=10, temp=1.0, mixup_alpha = 0.1, layer_mix = 0, num_mixup = None): super(ResNet_Cifar, self).__init__() self.in_planes = 64 #mixup self.mixup_alpha = mixup_alpha self.num_classes = num_classes if layer_mix == None: self.layer_mix = 0 else: self.layer_mix = layer_mix if num_mixup is not None: self.num_mixup = num_mixup self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, layers[0], stride=1) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(512*block.expansion, num_classes) self.temp = temp def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward_multimixup(self, x, target): if target is not None: target_reweighted = to_one_hot(target, self.num_classes) mixup = [] lam = [] target_re = [] if self.layer_mix == 0: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = self.conv1(x) x = self.bn1(x) x = F.relu(x) # x_mix = self.maxpool(x_mix) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) if self.layer_mix == 4: for i in range(self.num_mixup): x_mix, target_reweighted_curr, lam_current = mixup_process(x, target_reweighted, self.mixup_alpha) mixup.append(x_mix) lam.append(lam_current) target_re.append(target_reweighted_curr) x = torch.cat(mixup, dim=0) target_re = torch.cat(target_re, dim=0) x = F.avg_pool2d(x, 4) x = x.view(x.size(0), -1) x = self.fc(x) / self.temp return x, target_re, lam def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.fc(out) / self.temp return out def resnet50(**kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) return model def resnet101(**kwargs): model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) return model def resnet34_tiny(temp=1.0, **kwargs): model = ResNet_Tiny(BasicBlock_Tiny, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet50_tiny(temp=1.0, **kwargs): model = ResNet_Tiny(Bottleneck_Tiny, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet101_tiny(temp=1.0, **kwargs): model = ResNet_Tiny(Bottleneck_Tiny, [3, 4, 23, 3], temp=temp, **kwargs) return model def resnet34_cifar(temp=1.0, **kwargs): model = ResNet_Cifar(BasicBlock_Cifar, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet50_cifar(temp=1.0, **kwargs): model = ResNet_Cifar(Bottleneck_Cifar, [3, 4, 6, 3], temp=temp, **kwargs) return model def resnet101_cifar(temp=1.0, **kwargs): model = ResNet_Cifar(Bottleneck_Cifar, [3, 4, 23, 3], temp=temp, **kwargs) return model def mixup_process(out, target_reweighted, alpha): if alpha > 0.: lam = np.random.beta(alpha, alpha) else: lam = 1. indices = np.random.permutation(out.size(0)) out = out*lam + out[indices]*(1-lam) target_shuffled_onehot = target_reweighted[indices] target_reweighted = target_reweighted * lam + target_shuffled_onehot * (1 - lam) return out, target_reweighted, lam def to_one_hot(inp, num_classes): y_onehot = torch.zeros(inp.size(0), num_classes, device=inp.device, requires_grad=False) # y_onehot.zero_() y_onehot.scatter_(1, inp.unsqueeze(1).data, 1) return y_onehot
24,005
Python
.py
552
33.166667
118
0.58991
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,904
segement_trainer.py
cvlab-yonsei_RankMixup/calibrate/engine/segement_trainer.py
from typing import Dict import numpy as np import os.path as osp from shutil import copyfile import time import json import logging import torch import torch.nn.functional as F from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate import wandb from terminaltables.ascii_table import AsciiTable from calibrate.engine.trainer import Trainer from calibrate.net import ModelWithTemperature from calibrate.losses import LogitMarginL1 from calibrate.evaluation import ( AverageMeter, LossMeter, SegmentEvaluator, SegmentCalibrateEvaluator, SegmentLogitsEvaluator ) from calibrate.utils import ( load_train_checkpoint, load_checkpoint, save_checkpoint, round_dict ) from calibrate.utils.torch_helper import to_numpy, get_lr logger = logging.getLogger(__name__) class SegmentTrainer(Trainer): def __init__(self, cfg: DictConfig) -> None: super().__init__(cfg) def build_meter(self): self.batch_time_meter = AverageMeter() self.data_time_meter = AverageMeter() self.num_classes = self.cfg.model.num_classes if hasattr(self.loss_func, "names"): self.loss_meter = LossMeter( num_terms=len(self.loss_func.names), names=self.loss_func.names ) else: self.loss_meter = LossMeter() self.evaluator = SegmentEvaluator( self.train_loader.dataset.classes, ignore_index=255 ) self.calibrate_evaluator = SegmentCalibrateEvaluator( self.num_classes, num_bins=self.cfg.calibrate.num_bins, ignore_index=255, device=self.device ) # self.logits_evaluator = SegmentLogitsEvaluator(ignore_index=255) def reset_meter(self): self.batch_time_meter.reset() self.data_time_meter.reset() self.loss_meter.reset() self.evaluator.reset() # self.logits_evaluator.reset() def log_iter_info(self, iter, max_iter, epoch, phase="Train"): log_dict = {} log_dict["data_time"] = self.data_time_meter.val log_dict["batch_time"] = self.batch_time_meter.val log_dict.update(self.loss_meter.get_vals()) log_dict.update(self.evaluator.curr_score()) # log_dict.update(self.logits_evaluator.curr_score()) # log_dict.update(self.probs_evaluator.curr_score()) logger.info("{} Iter[{}/{}][{}]\t{}".format( phase, iter + 1, max_iter, epoch + 1, json.dumps(round_dict(log_dict)) )) if self.cfg.wandb.enable and phase.lower() == "train": wandb_log_dict = {"iter": epoch * max_iter + iter} wandb_log_dict.update(dict( ("{}/Iter/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb.log(wandb_log_dict) def log_epoch_info(self, epoch, phase="Train"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() log_dict["lr"] = get_lr(self.optimizer) log_dict.update(self.loss_meter.get_avgs()) if isinstance(self.loss_func, LogitMarginL1): log_dict["alpha"] = self.loss_func.alpha metric = self.evaluator.mean_score() log_dict.update(metric) # log_dict.update(self.logits_evaluator.mean_score()) # log_dict.update(self.probs_evaluator.mean_score()) logger.info("{} Epoch[{}]\t{}".format( phase, epoch + 1, json.dumps(round_dict(log_dict)) )) if self.cfg.wandb.enable: wandb_log_dict = {"epoch": epoch} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb.log(wandb_log_dict) def post_temperature(self): _, self.val_loader = instantiate(self.cfg.data.object.trainval) model_with_temp = ModelWithTemperature(self.model, device=self.device) model_with_temp.set_temperature_seg(self.val_loader) temp = model_with_temp.get_temperature() if self.cfg.wandb.enable: wandb.log({ "temperature": temp }) return temp def log_eval_epoch_info(self, epoch, phase="Val"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() log_dict.update(self.loss_meter.get_avgs()) metric = self.evaluator.mean_score() log_dict.update(metric) if phase.lower() == "test": calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False) log_dict.update(calibrate_metric) # log_dict.update(self.logits_evaluator.mean_score()) # log_dict.update(self.probs_evaluator.mean_score()) logger.info("{} Epoch[{}]\t{}".format( phase, epoch + 1, json.dumps(round_dict(log_dict)) )) class_table_data = self.evaluator.class_score(print=True, return_dataframe=True) if phase.lower() == "test": logger.info("\n" + AsciiTable(calibrate_table_data).table) if self.cfg.wandb.enable: wandb_log_dict = {"epoch": epoch} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb_log_dict["{}/segment_score_table".format(phase)] = ( wandb.Table( dataframe=class_table_data ) ) if phase.lower() == "test": wandb_log_dict["{}/calibrate_score_table".format(phase)] = ( wandb.Table( columns=calibrate_table_data[0], data=calibrate_table_data[1:] ) ) # if "test" in phase.lower() and self.cfg.calibrate.visualize: # fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram() # wandb_log_dict["{}/calibrate_reliability".format(phase)] = fig_reliab # wandb_log_dict["{}/confidence_histogram".format(phase)] = fig_hist wandb.log(wandb_log_dict) def train_epoch(self, epoch: int): self.reset_meter() self.model.train() max_iter = len(self.train_loader) end = time.time() for i, (inputs, labels) in enumerate(self.train_loader): # compute the time for data loading self.data_time_meter.update(time.time() - end) inputs, labels = inputs.to(self.device), labels.to(self.device) # forward outputs = self.model(inputs) if isinstance(outputs, Dict): outputs = outputs["out"] loss = self.loss_func(outputs, labels) if isinstance(loss, tuple): # For compounding loss, make sure the first term is the overall loss loss_total = loss[0] else: loss_total = loss # backward self.optimizer.zero_grad() loss_total.backward() if self.cfg.train.clip_grad_norm: torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2) self.optimizer.step() # metric self.loss_meter.update(loss, inputs.size(0)) predicts = F.softmax(outputs, dim=1) pred_labels = torch.argmax(predicts, dim=1) self.evaluator.update( pred_labels.detach().cpu().numpy(), labels.detach().cpu().numpy() ) # self.logits_evaluator.update(to_numpy(outputs), to_numpy(labels)) # measure elapsed time self.batch_time_meter.update(time.time() - end) if (i + 1) % self.cfg.log_period == 0: self.log_iter_info(i, max_iter, epoch) end = time.time() self.log_epoch_info(epoch) @torch.no_grad() def eval_epoch(self, data_loader, epoch, temp=1.0, ts=False, phase="Val"): self.reset_meter() self.model.eval() max_iter = len(data_loader) end = time.time() for i, (inputs, labels) in enumerate(data_loader): self.data_time_meter.update(time.time() - end) inputs, labels = inputs.to(self.device), labels.to(self.device) outputs = self.model(inputs) # logits = self.model.forward_logit(inputs) if ts: outputs = outputs / temp if isinstance(outputs, Dict): outputs = outputs["out"] # loss = self.loss_func(outputs, labels) # metric # self.loss_meter.update(loss) predicts = F.softmax(outputs, dim=1) pred_labels = torch.argmax(predicts, dim=1) self.evaluator.update( to_numpy(pred_labels), to_numpy(labels) ) if phase.lower() == "test": self.calibrate_evaluator.update( outputs, labels ) # self.logits_evaluator( # np.expand_dims(to_numpy(outputs), axis=0), # np.expand_dims(to_numpy(labels), axis=0) # ) # measure elapsed time self.batch_time_meter.update(time.time() - end) # logging # if (i + 1) % self.cfg.log_period == 0: # self.log_iter_info(i, max_iter, epoch, phase) end = time.time() self.log_eval_epoch_info(epoch, phase) return self.loss_meter.avg(0), self.evaluator.mean_score(main=True) def test(self): logger.info("We are almost done : final testing ...") self.test_loader = instantiate(self.cfg.data.object.test) # test best pth # epoch = self.best_epoch # logger.info("#################") # logger.info(" Test at best epoch {}".format(epoch + 1)) # logger.info("#################") # logger.info("Best epoch[{}] :".format(epoch + 1)) load_checkpoint( osp.join(self.work_dir, "best.pth"), self.model, self.device ) self.eval_epoch(self.test_loader, epoch=100, phase="Test") if self.cfg.test.post_temperature: logger.info("Test with post-temperature scaling!") temp = self.post_temperature() self.eval_epoch(self.test_loader, epoch=100, phase="testPT", temp=temp, ts=True) def run(self): self.train() self.test()
10,550
Python
.py
243
32.633745
101
0.580168
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,905
nlp_trainer.py
cvlab-yonsei_RankMixup/calibrate/engine/nlp_trainer.py
import math import time import os.path as osp from shutil import copyfile import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import logging import wandb from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate from calibrate.engine.trainer import Trainer from calibrate.net import ModelWithTemperature from calibrate.utils.torch_helper import to_numpy from calibrate.utils import ( load_train_checkpoint, load_checkpoint, save_checkpoint, round_dict ) logger = logging.getLogger(__name__) class NLPTrainer(Trainer): def __init__(self, cfg: DictConfig) -> None: super().__init__(cfg) def build_data_loader(self) -> None: ( self.embedding_matrix, self.train_datas, self.train_labels, self.val_datas, self.val_labels, self.test_datas, self.test_labels, self.num_words, self.embedding_dim ) = instantiate(self.cfg.data.object.all) self.batch_size = self.cfg.data.batch_size self.num_classes = 20 def build_model(self) -> None: # embedding self.embedding_model = nn.Embedding(self.num_words, self.embedding_dim) self.embedding_model.to(self.device) self.embedding_model.state_dict()["weight"].copy_(self.embedding_matrix) # network self.model = instantiate(self.cfg.model.object) self.model.to(self.device) # loss self.loss_func = instantiate(self.cfg.loss.object) self.loss_func.to(self.device) logger.info(self.loss_func) logger.info("Model initialized") def train_epoch(self, epoch: int): self.reset_meter() self.model.train() self.embedding_model.eval() perm = np.random.permutation(np.arange(len(self.train_datas))) perm_train = np.take(self.train_datas, perm, axis=0) perm_labels = np.take(self.train_labels, perm, axis=0) max_iter = perm_train.shape[0] // self.batch_size end = time.time() for i in range(max_iter): inputs = torch.from_numpy( perm_train[i * self.batch_size:(i + 1) * self.batch_size] ).type(torch.LongTensor).to(self.device) labels = torch.from_numpy( np.argmax(perm_labels[i * self.batch_size:(i + 1) * self.batch_size], 1) ).to(self.device) self.data_time_meter.update(time.time() - end) with torch.no_grad(): embs = self.embedding_model(inputs) outputs = self.model(embs) loss = self.loss_func(outputs, labels) if isinstance(loss, tuple): loss_total = loss[0] else: loss_total = loss # backward self.optimizer.zero_grad() loss_total.backward() self.optimizer.step() # metric self.loss_meter.update(loss, inputs.size(0)) predicts = F.softmax(outputs, dim=1) self.evaluator.update( to_numpy(predicts), to_numpy(labels) ) self.logits_evaluator.update(to_numpy(outputs)) # measure elapsed time self.batch_time_meter.update(time.time() - end) if (i + 1) % self.cfg.log_period == 0: self.log_iter_info(i, max_iter, epoch) end = time.time() self.log_epoch_info(epoch) @torch.no_grad() def eval_epoch( self, eval_data, eval_labels, epoch, phase="Val", temp=1, post_temp=False ): self.reset_meter() self.model.eval() self.embedding_model.eval() max_iter = math.ceil(eval_data.shape[0] // self.batch_size) end = time.time() for i in range(max_iter): inputs = torch.from_numpy( eval_data[i * self.batch_size:min((i + 1) * self.batch_size, eval_data.shape[0])] ).type(torch.LongTensor).to(self.device) labels = torch.from_numpy( np.argmax(eval_labels[i * self.batch_size:min((i+1) * self.batch_size, eval_data.shape[0])], 1) ).to(self.device) embs = self.embedding_model(inputs) outputs = self.model(embs) if post_temp: outputs = outputs / temp loss = self.loss_func(outputs, labels) # metric self.loss_meter.update(loss) self.calibrate_evaluator.update(outputs, labels) self.logits_evaluator.update(to_numpy(outputs)) predicts = F.softmax(outputs, dim=1) self.evaluator.update( to_numpy(predicts), to_numpy(labels) ) # measure elapsed time self.batch_time_meter.update(time.time() - end) # logging if (i + 1) % self.cfg.log_period == 0: self.log_iter_info(i, max_iter, epoch, phase) end = time.time() self.log_eval_epoch_info(epoch, phase) return self.loss_meter.avg(0), self.evaluator.mean_score(all_metric=False)[0] def train(self): self.start_or_resume() logger.info( "Everything is perfect so far. Let's start training. Good luck!" ) for epoch in range(self.start_epoch, self.max_epoch): logger.info("=" * 20) logger.info(" Start epoch {}".format(epoch + 1)) logger.info("=" * 20) self.train_epoch(epoch) val_loss, val_score = self.eval_epoch(self.val_datas, self.val_labels, epoch, phase="Val") # run lr scheduler self.scheduler.step() if self.best_score is None or val_score > self.best_score: self.best_score, self.best_epoch = val_score, epoch best_checkpoint = True else: best_checkpoint = False save_checkpoint( self.work_dir, self.model, self.optimizer, self.scheduler, epoch=epoch, best_checkpoint=best_checkpoint, val_score=val_score, keep_checkpoint_num=self.cfg.train.keep_checkpoint_num ) # logging best performance on val so far logger.info( "Epoch[{}]\tBest {} on Val : {:.4f} at epoch {}".format( epoch + 1, self.evaluator.main_metric(), self.best_score, self.best_epoch + 1 ) ) if self.cfg.wandb.enable and best_checkpoint: wandb.log({ "epoch": epoch, "Val/best_epoch": self.best_epoch, "Val/best_{}".format(self.evaluator.main_metric()): self.best_score, "Val/best_classify_score_table": self.evaluator.wandb_score_table(), "Val/best_calibrate_score_table": self.calibrate_evaluator.wandb_score_table() }) if self.cfg.wandb.enable: copyfile( osp.join(self.work_dir, "best.pth"), osp.join(self.work_dir, "{}-best.pth".format(wandb.run.name)) ) def post_temperature(self): model_with_temp = ModelWithTemperature(self.model, device=self.device) model_with_temp.set_temperature_ng( self.embedding_model, self.val_datas, self.val_labels, batch_size=self.batch_size ) temp = model_with_temp.get_temperature() wandb.log({ "temperature": temp }) return temp def test(self): logger.info("We are almost done : final testing ...") # test best pth epoch = self.best_epoch logger.info("#################") logger.info(" Test at best epoch {}".format(epoch + 1)) logger.info("#################") logger.info("Best epoch[{}] :".format(epoch + 1)) load_checkpoint( osp.join(self.work_dir, "best.pth"), self.model, self.device ) self.eval_epoch(self.test_datas, self.test_labels, epoch, phase="Test") temp = self.post_temperature() self.eval_epoch(self.test_datas, self.test_labels, epoch, phase="TestPT", temp=temp, post_temp=True)
8,327
Python
.py
201
30.373134
111
0.571464
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,906
tester.py
cvlab-yonsei_RankMixup/calibrate/engine/tester.py
import os.path as osp import time import json import logging import numpy as np import torch import torch.nn.functional as F from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate import wandb from terminaltables.ascii_table import AsciiTable from typing import Optional from calibrate.net import ModelWithTemperature from calibrate.evaluation import ( AverageMeter, LossMeter, ClassificationEvaluator, CalibrateEvaluator ) from calibrate.utils import ( load_train_checkpoint, load_checkpoint, save_checkpoint, round_dict ) from calibrate.utils.torch_helper import to_numpy, get_lr logger = logging.getLogger(__name__) class Tester: def __init__(self, cfg: DictConfig) -> None: self.cfg = cfg self.work_dir = self.cfg.work_dir self.device = torch.device(self.cfg.device) self.build_data_loader() self.build_model(self.cfg.test.checkpoint) self.build_meter() self.init_wandb_or_not() def build_data_loader(self) -> None: # data pipeline self.test_loader = instantiate(self.cfg.data.object.test) def build_model(self, checkpoint: Optional[str] = "") -> None: self.model = instantiate(self.cfg.model.object) self.model.to(self.device) logger.info("Model initialized") self.checkpoint_path = osp.join( self.work_dir, "last.pth" if checkpoint == "" else checkpoint #best.pth ) load_checkpoint(self.checkpoint_path, self.model, self.device) def build_meter(self): self.batch_time_meter = AverageMeter() self.num_classes = self.cfg.model.num_classes self.evaluator = ClassificationEvaluator(self.num_classes) self.calibrate_evaluator = CalibrateEvaluator( self.num_classes, num_bins=self.cfg.calibrate.num_bins, device=self.device, ) def reset_meter(self): self.batch_time_meter.reset() self.evaluator.reset() self.calibrate_evaluator.reset() def init_wandb_or_not(self) -> None: if self.cfg.wandb.enable: wandb.init( project=self.cfg.wandb.project, entity=self.cfg.wandb.entity, config=OmegaConf.to_container(self.cfg, resolve=True), tags=["test"], ) wandb.run.name = "{}-{}-{}".format( wandb.run.id, self.cfg.model.name, self.cfg.loss.name ) wandb.run.save() wandb.watch(self.model, log=None) logger.info("Wandb initialized : {}".format(wandb.run.name)) def mixup_data(self, x, y, alpha=1.0, use_cuda=True): import numpy as np '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) # else: # lam = 0.5 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam @torch.no_grad() def eval_epoch( self, data_loader, phase="Val", temp=1.0, post_temp=False, ) -> None: self.reset_meter() self.model.eval() end = time.time() for i, (inputs, labels) in enumerate(data_loader): inputs, labels = inputs.to(self.device), labels.to(self.device) # forward outputs = self.model(inputs) # logits = self.model.forward_logit(inputs) if post_temp: outputs = outputs / temp # metric self.calibrate_evaluator.update(outputs, labels) predicts = F.softmax(outputs, dim=1) self.evaluator.update( to_numpy(predicts), to_numpy(labels) ) # measure elapsed time self.batch_time_meter.update(time.time() - end) end = time.time() self.log_eval_epoch_info(phase) if self.cfg.test.save_logits: logits_save_path = ( osp.splitext(self.checkpoint_path)[0] + "_logits" + ("_pt.npz" if post_temp else ".npz") ) self.calibrate_evaluator.save_npz(logits_save_path) def log_eval_epoch_info(self, phase="Val"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() classify_metric, classify_table_data = self.evaluator.mean_score(print=False) log_dict.update(classify_metric) calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False) log_dict.update(calibrate_metric) logger.info("{} Epoch\t{}".format( phase, json.dumps(round_dict(log_dict)) )) logger.info("\n" + AsciiTable(classify_table_data).table) logger.info("\n" + AsciiTable(calibrate_table_data).table) if self.cfg.wandb.enable: wandb_log_dict = {} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb_log_dict["{}/classify_score_table".format(phase)] = ( wandb.Table( columns=classify_table_data[0], data=classify_table_data[1:] ) ) wandb_log_dict["{}/calibrate_score_table".format(phase)] = ( wandb.Table( columns=calibrate_table_data[0], data=calibrate_table_data[1:] ) ) if "test" in phase.lower() and self.cfg.calibrate.visualize: fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram() wandb_log_dict["{}/calibrate_reliability".format(phase)] = fig_reliab wandb_log_dict["{}/confidence_histogram".format(phase)] = fig_hist wandb.log(wandb_log_dict) def post_temperature(self): _, self.val_loader = instantiate(self.cfg.data.object.trainval) model_with_temp = ModelWithTemperature(self.model, device=self.device) model_with_temp.set_temperature(self.val_loader) temp = model_with_temp.get_temperature() if self.cfg.wandb.enable: wandb.log({ "temperature": temp }) return temp def test(self): logger.info( "Everything is perfect so far. Let's start testing. Good luck!" ) self.eval_epoch(self.test_loader, phase="Test") if self.cfg.test.post_temperature: logger.info("Test with post-temperature scaling!") temp = self.post_temperature() self.eval_epoch(self.test_loader, phase="TestPT", temp=temp, post_temp=True) def run(self): self.test()
7,003
Python
.py
172
30.482558
97
0.595516
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,907
trainer.py
cvlab-yonsei_RankMixup/calibrate/engine/trainer.py
import os.path as osp from shutil import copyfile import time import json import logging import torch import torch.nn.functional as F from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate import wandb from terminaltables.ascii_table import AsciiTable from calibrate.net import ModelWithTemperature from calibrate.losses import LogitMarginL1 from calibrate.evaluation import ( AverageMeter, LossMeter, ClassificationEvaluator, CalibrateEvaluator, LogitsEvaluator, ProbsEvaluator, LT_ClassificationEvaluator ) from calibrate.utils import ( load_train_checkpoint, load_checkpoint, save_checkpoint, round_dict ) from calibrate.utils.torch_helper import to_numpy, get_lr logger = logging.getLogger(__name__) class Trainer: def __init__(self, cfg: DictConfig) -> None: self.cfg = cfg self.work_dir = self.cfg.work_dir self.device = torch.device(self.cfg.device) self.build_data_loader() self.build_model() self.build_solver() self.build_meter() self.init_wandb_or_not() def build_data_loader(self) -> None: # data pipeline self.train_loader, self.val_loader = instantiate(self.cfg.data.object.trainval) logger.info("Data pipeline initialized") def build_model(self) -> None: # network self.model = instantiate(self.cfg.model.object) self.model.to(self.device) if hasattr(self.cfg.loss, 'num_classes'): self.cfg.loss.num_classes = self.cfg.model.num_classes self.loss_func = instantiate(self.cfg.loss.object) self.loss_func.to(self.device) logger.info(self.loss_func) logger.info("Model initialized") self.mixup = self.cfg.train.mixup def build_solver(self) -> None: # build solver parameters = [ {"params": self.model.parameters(), "lr": self.cfg.optim.lr}, ] if self.cfg.optim.name == 'sgd': self.optimizer = torch.optim.SGD(parameters, momentum=self.cfg.optim.momentum, weight_decay=self.cfg.optim.weight_decay) else: raise NotImplementedError self.scheduler = instantiate( self.cfg.scheduler.object, self.optimizer ) logger.info("Solver initialized") def init_wandb_or_not(self) -> None: if self.cfg.wandb.enable: wandb.init( project=self.cfg.wandb.project, entity=self.cfg.wandb.entity, config=OmegaConf.to_container(self.cfg, resolve=True), tags=["train"], ) wandb.run.name = "{}-{}-{}".format( wandb.run.id, self.cfg.model.name, self.cfg.loss.name ) wandb.run.save() wandb.watch(self.model, log=None) logger.info("Wandb initialized : {}".format(wandb.run.name)) def start_or_resume(self): if self.cfg.train.resume: self.start_epoch, self.best_epoch, self.best_score = ( load_train_checkpoint( self.work_dir, self.device, self.model, optimizer=self.optimizer, scheduler=self.scheduler ) ) else: self.start_epoch, self.best_epoch, self.best_score = 0, -1, None self.max_epoch = self.cfg.train.max_epoch def build_meter(self): self.batch_time_meter = AverageMeter() self.data_time_meter = AverageMeter() self.num_classes = self.cfg.model.num_classes if hasattr(self.loss_func, "names"): self.loss_meter = LossMeter( num_terms=len(self.loss_func.names), names=self.loss_func.names ) else: self.loss_meter = LossMeter() if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt': self.evaluator = LT_ClassificationEvaluator(self.num_classes) else: self.evaluator = ClassificationEvaluator(self.num_classes) self.calibrate_evaluator = CalibrateEvaluator( self.num_classes, num_bins=self.cfg.calibrate.num_bins, device=self.device, ) self.logits_evaluator = LogitsEvaluator() # self.probs_evaluator = ProbsEvaluator(self.num_classes) def reset_meter(self): self.batch_time_meter.reset() self.data_time_meter.reset() self.loss_meter.reset() self.evaluator.reset() self.calibrate_evaluator.reset() self.logits_evaluator.reset() def log_iter_info(self, iter, max_iter, epoch, phase="Train"): log_dict = {} log_dict["data_time"] = self.data_time_meter.val log_dict["batch_time"] = self.batch_time_meter.val log_dict.update(self.loss_meter.get_vals()) log_dict.update(self.evaluator.curr_score()) log_dict.update(self.logits_evaluator.curr_score()) # log_dict.update(self.probs_evaluator.curr_score()) logger.info("{} Iter[{}/{}][{}]\t{}".format( phase, iter + 1, max_iter, epoch + 1, json.dumps(round_dict(log_dict)) )) if self.cfg.wandb.enable and phase.lower() == "train": wandb_log_dict = {"iter": epoch * max_iter + iter} wandb_log_dict.update(dict( ("{}/Iter/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb.log(wandb_log_dict) def log_epoch_info(self, epoch, phase="Train"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() log_dict["lr"] = get_lr(self.optimizer) log_dict.update(self.loss_meter.get_avgs()) if isinstance(self.loss_func, LogitMarginL1): log_dict["alpha"] = self.loss_func.alpha metric, table_data = self.evaluator.mean_score(print=False) log_dict.update(metric) log_dict.update(self.logits_evaluator.mean_score()) # log_dict.update(self.probs_evaluator.mean_score()) logger.info("{} Epoch[{}]\t{}".format( phase, epoch + 1, json.dumps(round_dict(log_dict)) )) if self.cfg.wandb.enable: wandb_log_dict = {"epoch": epoch} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) if phase.lower() != "train": wandb_log_dict["{}/score_table".format(phase)] = wandb.Table( columns=table_data[0], data=table_data[1:] ) wandb.log(wandb_log_dict) def log_eval_epoch_info(self, epoch, phase="Val"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() log_dict.update(self.loss_meter.get_avgs()) classify_metric, classify_table_data = self.evaluator.mean_score(print=False) log_dict.update(classify_metric) calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False) log_dict.update(calibrate_metric) log_dict.update(self.logits_evaluator.mean_score()) # log_dict.update(self.probs_evaluator.mean_score()) logger.info("{} Epoch[{}]\t{}".format( phase, epoch + 1, json.dumps(round_dict(log_dict)) )) logger.info("\n" + AsciiTable(classify_table_data).table) logger.info("\n" + AsciiTable(calibrate_table_data).table) if self.cfg.wandb.enable: wandb_log_dict = {"epoch": epoch} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb_log_dict["{}/classify_score_table".format(phase)] = ( wandb.Table( columns=classify_table_data[0], data=classify_table_data[1:] ) ) wandb_log_dict["{}/calibrate_score_table".format(phase)] = ( wandb.Table( columns=calibrate_table_data[0], data=calibrate_table_data[1:] ) ) if "test" in phase.lower() and self.cfg.calibrate.visualize: fig_reliab, fig_hist = self.calibrate_evaluator.plot_reliability_diagram() wandb_log_dict["{}/calibrate_reliability".format(phase)] = fig_reliab wandb_log_dict["{}/confidence_histogram".format(phase)] = fig_hist wandb.log(wandb_log_dict) def mixup_data(self, x, y, alpha=1.0, use_cuda=True): import numpy as np '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam def mixup_criterion(self, criterion, pred, y_a, y_b, lam): return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) def train_epoch(self, epoch: int): self.reset_meter() self.model.train() if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt': class_num = torch.zeros(self.num_classes).cuda() correct = torch.zeros(self.num_classes).cuda() max_iter = len(self.train_loader) # max_iter = len(self.val_loader) end = time.time() for i, (inputs, labels) in enumerate(self.train_loader): #self.train_loader # compute the time for data loading self.data_time_meter.update(time.time() - end) inputs, labels = inputs.to(self.device), labels.to(self.device) # forward if self.mixup: outputs = self.model(inputs) mixup, target_re, lam = self.model.forward_multimixup(inputs, labels) loss = self.loss_func(outputs, labels, mixup, target_re, lam) else: outputs = self.model(inputs) loss = self.loss_func(outputs, labels) if isinstance(loss, tuple): loss_total = loss[0] else: loss_total = loss # backward self.optimizer.zero_grad() loss_total.backward() if self.cfg.train.clip_grad_norm: torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2) self.optimizer.step() # metric self.loss_meter.update(loss, inputs.size(0)) predicts = F.softmax(outputs, dim=1) if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt': _, predicted = predicts.max(1) target_one_hot = F.one_hot(labels, self.num_classes) predict_one_hot = F.one_hot(predicted, self.num_classes) class_num = class_num + target_one_hot.sum(dim=0).to(torch.float) correct = correct + (target_one_hot + predict_one_hot == 2).sum(dim=0).to(torch.float) self.evaluator.update( to_numpy(predicts), to_numpy(labels), to_numpy(correct), to_numpy(class_num), self.cfg.data.head_class_idx, self.cfg.data.med_class_idx, self.cfg.data.tail_class_idx ) else: self.evaluator.update( to_numpy(predicts), to_numpy(labels) ) # pred_labels = torch.argmax(predicts, dim=1) self.logits_evaluator.update(to_numpy(outputs)) # self.probs_evaluator.update(to_numpy(predicts)) # measure elapsed time self.batch_time_meter.update(time.time() - end) if (i + 1) % self.cfg.log_period == 0: self.log_iter_info(i, max_iter, epoch) end = time.time() self.log_epoch_info(epoch) @torch.no_grad() def eval_epoch( self, data_loader, epoch, phase="Val", temp=1.0, post_temp=False ): self.reset_meter() self.model.eval() if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt': class_num = torch.zeros(self.num_classes).cuda() correct = torch.zeros(self.num_classes).cuda() max_iter = len(data_loader) end = time.time() for i, (inputs, labels) in enumerate(data_loader): inputs, labels = inputs.to(self.device), labels.to(self.device) # forward if self.mixup: outputs = self.model(inputs) mixup, target_re, lam = self.model.forward_multimixup(inputs, labels) loss = self.loss_func(outputs, labels, mixup, target_re, lam) else: outputs = self.model(inputs) loss = self.loss_func(outputs, labels) # metric self.loss_meter.update(loss) self.calibrate_evaluator.update(outputs, labels) self.logits_evaluator.update(to_numpy(outputs)) predicts = F.softmax(outputs, dim=1) if self.cfg.data.name=='cifar10_lt' or self.cfg.data.name=='cifar100_lt': _, predicted = predicts.max(1) target_one_hot = F.one_hot(labels, self.num_classes) predict_one_hot = F.one_hot(predicted, self.num_classes) class_num = class_num + target_one_hot.sum(dim=0).to(torch.float) correct = correct + (target_one_hot + predict_one_hot == 2).sum(dim=0).to(torch.float) self.evaluator.update( to_numpy(predicts), to_numpy(labels), to_numpy(correct), to_numpy(class_num), self.cfg.data.head_class_idx, self.cfg.data.med_class_idx, self.cfg.data.tail_class_idx ) else: self.evaluator.update( to_numpy(predicts), to_numpy(labels) ) # measure elapsed time self.batch_time_meter.update(time.time() - end) # logging if (i + 1) % self.cfg.log_period == 0: self.log_iter_info(i, max_iter, epoch, phase) end = time.time() if hasattr(self.loss_func, 'margin'): logger.info(self.loss_func.margin) self.log_eval_epoch_info(epoch, phase) return self.loss_meter.avg(0), self.evaluator.mean_score(all_metric=False)[0] def train(self): self.start_or_resume() logger.info( "Everything is perfect so far. Let's start training. Good luck!" ) for epoch in range(self.start_epoch, self.max_epoch): logger.info("=" * 20) logger.info(" Start epoch {}".format(epoch + 1)) logger.info("=" * 20) self.train_epoch(epoch) val_loss, val_score = self.eval_epoch(self.val_loader, epoch, phase="Val") # run lr scheduler self.scheduler.step() if isinstance(self.loss_func, (LogitMarginL1)): self.loss_func.schedule_alpha(epoch) if self.best_score is None or val_score > self.best_score: self.best_score, self.best_epoch = val_score, epoch best_checkpoint = True else: best_checkpoint = False save_checkpoint( self.work_dir, self.model, self.loss_func, self.optimizer, self.scheduler, epoch=epoch, best_checkpoint=best_checkpoint, val_score=val_score, keep_checkpoint_num=self.cfg.train.keep_checkpoint_num, keep_checkpoint_interval=self.cfg.train.keep_checkpoint_interval ) # logging best performance on val so far logger.info( "Epoch[{}]\tBest {} on Val : {:.4f} at epoch {}".format( epoch + 1, self.evaluator.main_metric(), self.best_score, self.best_epoch + 1 ) ) if self.cfg.wandb.enable and best_checkpoint: wandb.log({ "epoch": epoch, "Val/best_epoch": self.best_epoch, "Val/best_{}".format(self.evaluator.main_metric()): self.best_score, "Val/best_classify_score_table": self.evaluator.wandb_score_table(), "Val/best_calibrate_score_table": self.calibrate_evaluator.wandb_score_table() }) if self.cfg.wandb.enable: copyfile( osp.join(self.work_dir, "best.pth"), osp.join(self.work_dir, "{}-best.pth".format(wandb.run.name)) ) def post_temperature(self): model_with_temp = ModelWithTemperature(self.model, device=self.device) model_with_temp.set_temperature(self.val_loader) temp = model_with_temp.get_temperature() if self.cfg.wandb.enable: wandb.log({ "temperature": temp }) return temp def test(self): logger.info("We are almost done : final testing ...") self.test_loader = instantiate(self.cfg.data.object.test) # test best pth epoch = self.best_epoch logger.info("#################") logger.info(" Test at best epoch {}".format(epoch + 1)) logger.info("#################") logger.info("Best epoch[{}] :".format(epoch + 1)) load_checkpoint( osp.join(self.work_dir, "best.pth"), self.model, self.device ) self.eval_epoch(self.test_loader, epoch, phase="Test") temp = self.post_temperature() self.eval_epoch(self.test_loader, epoch, phase="TestPT", temp=temp, post_temp=True) def run(self): self.train() self.test()
18,112
Python
.py
396
33.800505
132
0.574151
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,908
__init__.py
cvlab-yonsei_RankMixup/calibrate/engine/__init__.py
from .trainer import Trainer from .segement_trainer import SegmentTrainer from .nlp_trainer import NLPTrainer from .tester import Tester from .ood_tester import OODTester
172
Python
.py
5
33.2
44
0.861446
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,909
ood_tester.py
cvlab-yonsei_RankMixup/calibrate/engine/ood_tester.py
import os.path as osp import time import json import logging import torch import torch.nn.functional as F from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate import wandb from terminaltables.ascii_table import AsciiTable from typing import Optional from calibrate.net import ModelWithTemperature from calibrate.evaluation import ( AverageMeter, LossMeter, ClassificationEvaluator, CalibrateEvaluator, OODEvaluator ) from calibrate.utils import ( load_train_checkpoint, load_checkpoint, save_checkpoint, round_dict ) from calibrate.utils.torch_helper import to_numpy, get_lr from .tester import Tester logger = logging.getLogger(__name__) class OODTester(Tester): def __init__(self, cfg: DictConfig) -> None: super().__init__(cfg) def build_data_loader(self) -> None: # data pipeline self.in_test_loader = instantiate(self.cfg.data.object.in_dist) self.out_test_loader = instantiate(self.cfg.data.object.out_dist) def build_meter(self): self.batch_time_meter = AverageMeter() self.num_classes = self.cfg.model.num_classes self.evaluator = OODEvaluator(self.num_classes) def reset_meter(self): self.batch_time_meter.reset() self.evaluator.reset() @torch.no_grad() def eval_epoch( self, phase="Val", temp=1.0, post_temp=False ) -> None: self.reset_meter() self.model.eval() end = time.time() for i, (inputs, labels) in enumerate(self.in_test_loader): inputs, labels = inputs.to(self.device), labels.to(self.device) # forward outputs = self.model(inputs) if post_temp: outputs = outputs / temp # metric predicts = F.softmax(outputs, dim=1) self.evaluator.update( to_numpy(predicts), to_numpy(labels), in_dist=True ) # measure elapsed time self.batch_time_meter.update(time.time() - end) for i, (inputs, labels) in enumerate(self.out_test_loader): inputs, labels = inputs.to(self.device), labels.to(self.device) # forward outputs = self.model(inputs) if post_temp: outputs = outputs / temp # metric predicts = F.softmax(outputs, dim=1) self.evaluator.update( to_numpy(predicts), to_numpy(labels), in_dist=False ) # measure elapsed time self.batch_time_meter.update(time.time() - end) end = time.time() self.log_eval_epoch_info(phase) def log_eval_epoch_info(self, phase="Val"): log_dict = {} log_dict["samples"] = self.evaluator.num_samples() metric, table_data = self.evaluator.mean_score(print=False) log_dict.update(metric) logger.info("{} Epoch\t{}".format( phase, json.dumps(round_dict(log_dict)) )) logger.info("\n" + AsciiTable(table_data).table) if self.cfg.wandb.enable: wandb_log_dict = {} wandb_log_dict.update(dict( ("{}/{}".format(phase, key), value) for (key, value) in log_dict.items() )) wandb_log_dict["{}/classify_score_table".format(phase)] = ( wandb.Table( columns=table_data[0], data=table_data[1:] ) ) wandb.log(wandb_log_dict) def test(self): logger.info( "Everything is perfect so far. Let's start testing. Good luck!" ) self.eval_epoch(phase="Test") logger.info("Test with post-temperature scaling!") temp = self.post_temperature() self.eval_epoch(phase="TestPT", temp=temp, post_temp=True)
3,889
Python
.py
104
28.221154
88
0.603179
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,910
file_io.py
cvlab-yonsei_RankMixup/calibrate/utils/file_io.py
import os import os.path as osp import zipfile from typing import List def mkdir(output_dir : str) -> None: if not os.path.exists(output_dir): os.makedirs(output_dir) def zipdir(path, result_path): zipf = zipfile.ZipFile(result_path, "w") for root, dirs, files in os.walk(path): for file in files: zipf.write( osp.join(root, file), osp.relpath(osp.join(root, file), osp.join(path, '..')) ) zipf.close() def load_list(path: str) -> List[str]: """load list from text file""" assert osp.exists(path), "{} does not exist".format(path) ret = [] with open(path, "r") as f: for line in f: ret.append(line.strip()) return ret def save_list(lines, path: str) -> None: with open(path, "w") as f: for line in lines: f.write("{}\n".format(line))
895
Python
.py
28
25.214286
71
0.589744
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,911
constants.py
cvlab-yonsei_RankMixup/calibrate/utils/constants.py
EPS: float = 1e-10 #: Loss binary mode suppose you are solving binary segmentation task. #: That mean yor have only one class which pixels are labled as **1**, #: the rest pixels are background and labeled as **0**. #: Target mask shape - (N, H, W), model output mask shape (N, 1, H, W). BINARY_MODE: str = "binary" #: Loss multiclass mode suppose you are solving multi-**class** segmentation task. #: That mean you have *C = 1..N* classes which have unique label values, #: classes are mutually exclusive and all pixels are labeled with theese values. #: Target mask shape - (N, H, W), model output mask shape (N, C, H, W). MULTICLASS_MODE: str = "multiclass" #: Loss multilabel mode suppose you are solving multi-**label** segmentation task. #: That mean you have *C = 1..N* classes which pixels are labeled as **1**, #: classes are not mutually exclusive and each class have its own *channel*, #: pixels in each channel which are not belong to class labeled as **0**. #: Target mask shape - (N, C, H, W), model output mask shape (N, C, H, W). MULTILABEL_MODE: str = "multilabel"
1,085
Python
.py
17
62.647059
82
0.714152
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,912
__init__.py
cvlab-yonsei_RankMixup/calibrate/utils/__init__.py
from .file_io import * from .checkpoint import * from .misc import set_random_seed, get_logfile, round_dict from .dist_helper import *
135
Python
.py
4
32.75
58
0.778626
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,913
checkpoint.py
cvlab-yonsei_RankMixup/calibrate/utils/checkpoint.py
import os import os.path as osp import logging import torch from typing import Optional, Tuple logger = logging.getLogger(__name__) def load_checkpoint( model_path: str, model: torch.nn.Module, device: torch.device ) -> None: if not osp.exists(model_path): raise FileNotFoundError( "Model not found : {}".format(model_path) ) checkpoint = torch.load(model_path, map_location=device) if "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] if "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] checkpoint = dict( (key[7:] if "module" in key else key, value) for (key, value) in checkpoint.items() ) missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) logger.info("Succeed to load weights from {}".format(model_path)) if missing_keys: logger.warn("Missing keys : {}".format(missing_keys)) if unexpected_keys: logger.warn("Unexpected keys : {}".format(unexpected_keys)) def load_train_checkpoint( work_dir: str, device: torch.device, model: torch.nn.Module, optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[object] = None ) -> Tuple: try: last_checkpoint_path = osp.join(work_dir, "last.pth") checkpoint = torch.load(last_checkpoint_path, map_location=device) epoch = checkpoint["epoch"] model.load_state_dict(checkpoint["state_dict"], strict=True) if optimizer: optimizer.load_state_dict(checkpoint["optimizer"]) if scheduler: scheduler.load_state_dict(checkpoint["scheduler"]) logger.info("Succeed to load train info from {}".format(last_checkpoint_path)) best_checkpoint_path = osp.join(work_dir, "best.pth") checkpoint = torch.load(best_checkpoint_path, map_location=device) best_epoch = checkpoint["epoch"] best_score = checkpoint["val_score"] if "val_score" in checkpoint else None return epoch + 1, best_epoch, best_score except Exception: return 0, -1, None def save_checkpoint( save_dir: str, model: torch.nn.Module, criterion: torch.nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler, epoch: int, best_checkpoint: bool = False, val_score: Optional[float] = None, keep_checkpoint_num: int = 1, keep_checkpoint_interval: int = 0 ) -> None: state = { "epoch": epoch, "state_dict": model.state_dict(), "criterion": criterion.state_dict(), "optimizer": optimizer.state_dict(), "scheduler": scheduler.state_dict() if scheduler is not None else None } if val_score: state["val_score"] = val_score torch.save(state, osp.join(save_dir, "last.pth")) if best_checkpoint: torch.save(state, osp.join(save_dir, "best.pth")) # else: # torch.save(state, osp.join(save_dir, f"ckpt_{epoch}.pth")) if keep_checkpoint_num > 1: torch.save(state, osp.join(save_dir, "epoch_{}.pth".format(epoch + 1))) remove_file = osp.join(save_dir, "epoch_{}.pth".format(epoch + 1 - keep_checkpoint_num)) if osp.exists(remove_file): os.remove(remove_file) if keep_checkpoint_interval > 0: if (epoch + 1) % keep_checkpoint_interval == 0: torch.save( state, osp.join(save_dir, "epoch_{}.pth".format(epoch + 1)) )
3,510
Python
.py
90
32.255556
96
0.644679
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,914
dist_helper.py
cvlab-yonsei_RankMixup/calibrate/utils/dist_helper.py
import pickle import os import os.path as osp import socket import subprocess from typing import Callable, List, Optional, Tuple import torch from torch import distributed as dist import logging import sys def get_world_size(): if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def synchronize(): """ Helper function to synchronize (barrier) among all processes when using distributed training """ if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor origin_size = None if not isinstance(data, torch.Tensor): buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") else: origin_size = data.size() tensor = data.reshape(-1) tensor_type = tensor.dtype # obtain Tensor size of each rank local_size = torch.LongTensor([tensor.numel()]).to("cuda") size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type)) if local_size != max_size: padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type) tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): if origin_size is None: buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) else: buffer = tensor[:size] data_list.append(buffer) if origin_size is not None: new_shape = [-1] + list(origin_size[1:]) resized_list = [] for data in data_list: # suppose the difference of tensor size exist in first dimension data = data.reshape(new_shape) resized_list.append(data) return resized_list else: return data_list def _find_free_port() -> str: # Copied from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py # noqa: E501 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Binding to port 0 will cause the OS to find an available port for us sock.bind(('', 0)) port = sock.getsockname()[1] sock.close() # NOTE: there is still a chance the port could be taken by other processes. return port def _is_free_port(port: int) -> bool: ips = socket.gethostbyname_ex(socket.gethostname())[-1] ips.append('localhost') with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: return all(s.connect_ex((ip, port)) != 0 for ip in ips) def init_dist_pytorch(backend: str = "nccl") -> None: local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) torch.cuda.set_device(local_rank) dist.init_process_group( backend=backend, init_method="env://", world_size=world_size, rank=rank ) def init_dist_slurm(backend: str = "nccl", port: Optional[int] = None) -> None: proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device(proc_id % num_gpus) addr = subprocess.getoutput( f'scontrol show hostname {node_list} | head -n1') # specify master port if port is not None: os.environ['MASTER_PORT'] = str(port) elif 'MASTER_PORT' in os.environ: pass # use MASTER_PORT in the environment variable else: # if torch.distributed default port(29500) is available # then use it, else find a free port if _is_free_port(29500): os.environ['MASTER_PORT'] = '29500' else: os.environ['MASTER_PORT'] = str(_find_free_port()) # use MASTER_ADDR in the environment variable if it already exists if 'MASTER_ADDR' not in os.environ: os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend=backend, rank=proc_id, world_size=ntasks) def reduce_tensor(tensor, n): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= n return rt def build_dist_data_loader( dataset, batch_size, world_size=1, rank=0, shuffle=False, num_workers=4, pin_memory=True, drop_last=True, ): sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank, shuffle=shuffle, ) data_loader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, ) return data_loader def setup_dist_logger(save_dir, dist_rank, job_name="train", level=logging.INFO): formatter = logging.Formatter( fmt="[%(asctime)s %(levelname)s][%(filename)s:%(lineno)s - %(funcName)s] - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger() logger.setLevel(level) # create console handler for master process if dist_rank == 0: console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) # create file handler for all processes file_handler = logging.FileHandler( osp.join(save_dir, f"{job_name}_rank{dist_rank}.log"), mode="a", ) file_handler.setLevel(level) file_handler.setFormatter(formatter) logger.addHandler(file_handler) if dist.get_rank() == 0: logger.info(f"Logger at rank{dist_rank} is set up.") def setup_logger(save_dir, job_name="train", level=logging.INFO): formatter = logging.Formatter( fmt="[%(asctime)s %(levelname)s][%(filename)s:%(lineno)s - %(funcName)s] - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger() logger.setLevel(level) # create console handler for master process console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) # create file handler for all processes file_handler = logging.FileHandler( osp.join(save_dir, f"{job_name}.log"), mode="a", ) file_handler.setLevel(level) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info("Logger is set up.")
7,770
Python
.py
212
30.528302
115
0.657265
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,915
misc.py
cvlab-yonsei_RankMixup/calibrate/utils/misc.py
import logging import numpy as np import os import random from datetime import datetime import torch from copy import deepcopy logger = logging.getLogger(__name__) def set_random_seed(seed: int = None, deterministic: bool = False): """ Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. """ if seed is None: seed = ( os.getpid() + int(datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) logger = logging.getLogger(__name__) logger.info("Using a generated random seed {}".format(seed)) os.environ["PYTHONHASHSEED"] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def get_logfile(logger): if len(logger.root.handlers) == 1: return None else: return logger.root.handlers[1].baseFilename def round_dict(d, decimals=5): """ Return a new dictionary with all the flating values rounded with the sepcified number of decimals """ ret = deepcopy(d) for key in ret: if isinstance(ret[key], float): ret[key] = round(ret[key], decimals) return ret
1,615
Python
.py
48
27.479167
74
0.65619
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,916
torch_helper.py
cvlab-yonsei_RankMixup/calibrate/utils/torch_helper.py
import random import numpy as np import torch import torch.nn as nn from .constants import EPS def to_numpy(x: torch.Tensor): return x.detach().cpu().numpy() def get_lr(optimizer: torch.optim.Optimizer) -> float: for param in optimizer.param_groups: return param["lr"] def worker_init_fn(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) def kl_div(p: torch.Tensor, q: torch.Tensor) -> torch.Tensor: """kl divergence of two distribution: KL(p||q) = \sum p log(p/q) """ p = torch.flatten(p, 1) q = torch.flatten(q, 1) y = (p * torch.log(p / (q + EPS) + EPS)).mean() return y def entropy(x: torch.Tensor) -> torch.Tensor: """Entropy of input distribution: EN(x) = - \sum x * log(x) """ x = torch.flatten(x, 1) y = - x * torch.log(x + EPS) y = y.mean() return y def disable_bn(model): for module in model.modules(): if isinstance( module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d) ): module.eval() def enable_bn(model): model.train()
1,166
Python
.py
39
24.538462
61
0.619477
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,917
mdca.py
cvlab-yonsei_RankMixup/calibrate/losses/mdca.py
import torch import torch.nn as nn import torch.nn.functional as F from .label_smoothing import LabelSmoothingCrossEntropy class MDCA(nn.Module): def __init__(self): super(MDCA, self).__init__() self.ls = LabelSmoothingCrossEntropy() @property def names(self): return "loss", "loss_ce", "loss_mdca" def forward(self, output, target): output = torch.softmax(output, dim=1) # [batch, classes] loss_mdca = torch.tensor(0.0).cuda() batch, classes = output.shape for c in range(classes): avg_count = (target == c).float().mean() avg_conf = torch.mean(output[:,c]) loss_mdca += torch.abs(avg_conf - avg_count) denom = classes loss_mdca /= denom loss_mdca *= 1.0 loss_ce = self.ls(output, target) loss = loss_ce + loss_mdca return loss, loss_ce, loss_mdca
927
Python
.py
26
27.846154
56
0.603604
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,918
brier_score.py
cvlab-yonsei_RankMixup/calibrate/losses/brier_score.py
''' Implementation of Brier Score. ''' import torch import torch.nn as nn import torch.nn.functional as F class BrierScore(nn.Module): def __init__(self): super(BrierScore, self).__init__() def forward(self, input, target): if input.dim()>2: input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W input = input.transpose(1,2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1,1) target_one_hot = torch.FloatTensor(input.shape).to(target.get_device()) target_one_hot.zero_() target_one_hot.scatter_(1, target, 1) # pt = F.softmax(input) pt = F.log_softmax(input, dim=1).exp() squared_diff = (target_one_hot - pt) ** 2 loss = torch.sum(squared_diff) / float(input.shape[0]) return loss
909
Python
.py
23
32.608696
84
0.588435
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,919
mmce.py
cvlab-yonsei_RankMixup/calibrate/losses/mmce.py
''' Implementation of the MMCE (MMCE_m) and MMCE_weighted (MMCE_w). Reference: [1] A. Kumar, S. Sarawagi, U. Jain, Trainable Calibration Measures for Neural Networks from Kernel Mean Embeddings. ICML, 2018. ''' import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class MMCE(nn.Module): """ Computes MMCE_m loss. """ def __init__(self, device, lamda=1.0): super(MMCE, self).__init__() self.device = device self.lamda = lamda def torch_kernel(self, matrix): return torch.exp(-1.0*torch.abs(matrix[:, :, 0] - matrix[:, :, 1])/(0.4)) def forward(self, input, target): if input.dim()>2: input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W input = input.transpose(1,2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1) #For CIFAR-10 and CIFAR-100, target.shape is [N] to begin with predicted_probs = F.softmax(input, dim=1) predicted_probs, pred_labels = torch.max(predicted_probs, 1) correct_mask = torch.where(torch.eq(pred_labels, target), torch.ones(pred_labels.shape).to(self.device), torch.zeros(pred_labels.shape).to(self.device)) c_minus_r = correct_mask - predicted_probs dot_product = torch.mm(c_minus_r.unsqueeze(1), c_minus_r.unsqueeze(0)) prob_tiled = predicted_probs.unsqueeze(1).repeat(1, predicted_probs.shape[0]).unsqueeze(2) prob_pairs = torch.cat([prob_tiled, prob_tiled.permute(1, 0, 2)], dim=2) kernel_prob_pairs = self.torch_kernel(prob_pairs) numerator = dot_product*kernel_prob_pairs #return torch.sum(numerator)/correct_mask.shape[0]**2 # ce ce = F.cross_entropy(input, target) return ce + self.lamda * torch.sum(numerator) / torch.pow(torch.tensor(correct_mask.shape[0]).type(torch.FloatTensor), 2) class MMCE_weighted(nn.Module): """ Computes MMCE_w loss. """ def __init__(self, device, lamda=1.0): super(MMCE_weighted, self).__init__() self.device = device self.lamda = lamda def torch_kernel(self, matrix): return torch.exp(-1.0*torch.abs(matrix[:, :, 0] - matrix[:, :, 1])/(0.4)) def get_pairs(self, tensor1, tensor2): correct_prob_tiled = tensor1.unsqueeze(1).repeat(1, tensor1.shape[0]).unsqueeze(2) incorrect_prob_tiled = tensor2.unsqueeze(1).repeat(1, tensor2.shape[0]).unsqueeze(2) correct_prob_pairs = torch.cat([correct_prob_tiled, correct_prob_tiled.permute(1, 0, 2)], dim=2) incorrect_prob_pairs = torch.cat([incorrect_prob_tiled, incorrect_prob_tiled.permute(1, 0, 2)], dim=2) correct_prob_tiled_1 = tensor1.unsqueeze(1).repeat(1, tensor2.shape[0]).unsqueeze(2) incorrect_prob_tiled_1 = tensor2.unsqueeze(1).repeat(1, tensor1.shape[0]).unsqueeze(2) correct_incorrect_pairs = torch.cat([correct_prob_tiled_1, incorrect_prob_tiled_1.permute(1, 0, 2)], dim=2) return correct_prob_pairs, incorrect_prob_pairs, correct_incorrect_pairs def get_out_tensor(self, tensor1, tensor2): return torch.mean(tensor1*tensor2) def forward(self, input, target): if input.dim()>2: input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W input = input.transpose(1,2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1) #For CIFAR-10 and CIFAR-100, target.shape is [N] to begin with predicted_probs = F.softmax(input, dim=1) predicted_probs, predicted_labels = torch.max(predicted_probs, 1) correct_mask = torch.where(torch.eq(predicted_labels, target), torch.ones(predicted_labels.shape).to(self.device), torch.zeros(predicted_labels.shape).to(self.device)) k = torch.sum(correct_mask).type(torch.int64) k_p = torch.sum(1.0 - correct_mask).type(torch.int64) cond_k = torch.where(torch.eq(k,0),torch.tensor(0).to(self.device),torch.tensor(1).to(self.device)) cond_k_p = torch.where(torch.eq(k_p,0),torch.tensor(0).to(self.device),torch.tensor(1).to(self.device)) k = torch.max(k, torch.tensor(1).to(self.device))*cond_k*cond_k_p + (1 - cond_k*cond_k_p)*2 k_p = torch.max(k_p, torch.tensor(1).to(self.device))*cond_k_p*cond_k + ((1 - cond_k_p*cond_k)* (correct_mask.shape[0] - 2)) correct_prob, _ = torch.topk(predicted_probs*correct_mask, k) incorrect_prob, _ = torch.topk(predicted_probs*(1 - correct_mask), k_p) correct_prob_pairs, incorrect_prob_pairs,\ correct_incorrect_pairs = self.get_pairs(correct_prob, incorrect_prob) correct_kernel = self.torch_kernel(correct_prob_pairs) incorrect_kernel = self.torch_kernel(incorrect_prob_pairs) correct_incorrect_kernel = self.torch_kernel(correct_incorrect_pairs) sampling_weights_correct = torch.mm((1.0 - correct_prob).unsqueeze(1), (1.0 - correct_prob).unsqueeze(0)) correct_correct_vals = self.get_out_tensor(correct_kernel, sampling_weights_correct) sampling_weights_incorrect = torch.mm(incorrect_prob.unsqueeze(1), incorrect_prob.unsqueeze(0)) incorrect_incorrect_vals = self.get_out_tensor(incorrect_kernel, sampling_weights_incorrect) sampling_correct_incorrect = torch.mm((1.0 - correct_prob).unsqueeze(1), incorrect_prob.unsqueeze(0)) correct_incorrect_vals = self.get_out_tensor(correct_incorrect_kernel, sampling_correct_incorrect) correct_denom = torch.sum(1.0 - correct_prob) incorrect_denom = torch.sum(incorrect_prob) m = torch.sum(correct_mask) n = torch.sum(1.0 - correct_mask) mmd_error = 1.0/(m*m + 1e-5) * torch.sum(correct_correct_vals) mmd_error += 1.0/(n*n + 1e-5) * torch.sum(incorrect_incorrect_vals) mmd_error -= 2.0/(m*n + 1e-5) * torch.sum(correct_incorrect_vals) # ce ce = F.cross_entropy(input, target) return ce + self.lamda * torch.max((cond_k*cond_k_p).type(torch.FloatTensor).to(self.device).detach()*torch.sqrt(mmd_error + 1e-10), torch.tensor(0.0).to(self.device))
6,815
Python
.py
111
49.414414
175
0.60522
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,920
label_smoothing.py
cvlab-yonsei_RankMixup/calibrate/losses/label_smoothing.py
import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): def __init__(self, alpha=0.1, ignore_index=-100, reduction="mean"): super(LabelSmoothingCrossEntropy, self).__init__() self.alpha = alpha self.ignore_index = ignore_index self.reduction = reduction def forward(self, input, target): if input.dim() > 2: input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W input = input.transpose(1, 2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1) if self.ignore_index >= 0: index = torch.nonzero(target != self.ignore_index).squeeze() input = input[index, :] target = target[index] confidence = 1. - self.alpha logprobs = F.log_softmax(input, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = confidence * nll_loss + self.alpha * smooth_loss if self.reduction == "mean": return loss.mean() elif self.reduction == "sum": return loss.sum() else: return loss
1,351
Python
.py
31
34.387097
86
0.581431
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,921
logit_margin_plus.py
cvlab-yonsei_RankMixup/calibrate/losses/logit_margin_plus.py
import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from ..utils import reduce_tensor class LogitMarginPlus(nn.Module): """Add marginal penalty to logits: CE + alpha * max(0, max(l^n) - l^n - margin) """ def __init__(self, num_classes, margin=10, alpha=1.0, ignore_index=-100, gamma=1.1, tao=1.1, lambd_min: float = 1e-6, lambd_max: float = 1e6, step_size=100): super().__init__() self.num_classes = num_classes self.margin = margin self.alpha = alpha self.ignore_index = ignore_index self.gamma = gamma self.tao = tao self.lambd_min = lambd_min self.lambd_max = lambd_max self.step_size = step_size # alpha for each class self.lambd = self.alpha * torch.ones(self.num_classes, requires_grad=False).cuda() self.prev_score, self.curr_score = ( torch.zeros(self.num_classes, requires_grad=False).cuda(), torch.zeros(self.num_classes, requires_grad=False).cuda() ) self.cross_entropy = nn.CrossEntropyLoss() @property def names(self): return "loss", "loss_ce", "loss_margin_l1" def reset_update_lambd(self): self.prev_score, self.curr_score = self.curr_score, torch.zeros(self.num_classes).cuda() self.count = 0 def get_diff(self, inputs): max_values = inputs.max(dim=1) max_values = max_values.values.unsqueeze(dim=1).repeat(1, inputs.shape[1]) diff = max_values - inputs return diff def forward(self, inputs, targets): if inputs.dim() > 2: inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C targets = targets.view(-1) if self.ignore_index >= 0: index = torch.nonzero(targets != self.ignore_index).squeeze() inputs = inputs[index, :] targets = targets[index] loss_ce = self.cross_entropy(inputs, targets) diff = self.get_diff(inputs) # loss_margin = torch.clamp(diff - self.margin, min=0).mean() loss_margin = F.relu(diff-self.margin) loss_margin = torch.einsum("ik,k->ik", loss_margin, self.lambd).mean() # loss = loss_ce + self.alpha * loss_margin loss = loss_ce + loss_margin return loss, loss_ce, loss_margin def update_lambd(self, logits): diff = self.get_diff(logits) loss_margin = F.relu(diff-self.margin) loss_margin = torch.einsum("ik,k->ik", loss_margin, self.lambd).sum(dim=0) self.curr_score += loss_margin self.count += logits.shape[0] def set_lambd(self, epoch): self.curr_score = self.curr_score / self.count if dist.is_initialized(): self.curr_score = reduce_tensor(self.curr_score, dist.get_world_size()) if (epoch + 1) % self.step_size == 0 and self.prev_score.sum() != 0: self.lambd = torch.where( self.curr_score > self.prev_score * self.tao, self.lambd * self.gamma, self.lambd ) self.lambd = torch.where( self.curr_score < self.prev_score / self.tao, self.lambd / self.gamma, self.lambd ) self.lambd = torch.clamp(self.lambd, min=self.lambd_min, max=self.lambd_max).detach() def get_lambd_metric(self): return self.lambd.mean().item(), self.lambd.max().item()
3,821
Python
.py
89
32.640449
97
0.572698
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,922
penalty_l1.py
cvlab-yonsei_RankMixup/calibrate/losses/penalty_l1.py
import torch import torch.nn as nn import torch.nn.functional as F from calibrate.utils.constants import EPS class PenaltyL1(nn.Module): """Penalty L1 loss = CE + alpha * |s - 1/K| (s: softmax outputs, K : number of classes) """ def __init__(self, num_classes, alpha=1.0): super().__init__() self.num_classes = num_classes self.alpha = alpha @property def names(self): return "loss", "loss_ce", "loss_l1" def forward(self, inputs, targets): # cross_entropy loss_ce = F.cross_entropy(inputs, targets) # l1 s = F.log_softmax(inputs, dim=1).exp() loss_l1 = (s - 1.0 / self.num_classes).abs() loss_l1 = loss_l1.sum() / inputs.shape[0] # loss_l1 = loss_l1.mean() loss = loss_ce + self.alpha * loss_l1 return loss, loss_ce, loss_l1
903
Python
.py
25
28.04
82
0.576566
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,923
__init__.py
cvlab-yonsei_RankMixup/calibrate/losses/__init__.py
from .label_smoothing import LabelSmoothingCrossEntropy from .focal_loss import FocalLoss from .focal_loss_adaptive_gamma import FocalLossAdaptive from .mmce import MMCE, MMCE_weighted from .brier_score import BrierScore from .penalty_entropy import PenaltyEntropy from .penalty_l1 import PenaltyL1 from .logit_margin_l1 import LogitMarginL1 from .logit_margin_plus import LogitMarginPlus from .mdca import MDCA from .rankmixup import RankMixup_MRL, RankMixup_MNDCG
468
Python
.py
11
41.363636
56
0.861538
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,924
rankmixup.py
cvlab-yonsei_RankMixup/calibrate/losses/rankmixup.py
import torch import torch.nn as nn import torch.nn.functional as F class RankMixup_MRL(nn.Module): def __init__(self, num_classes: int = 10, margin: float = 0.1, alpha: float = 0.1, ignore_index: int =-100): super().__init__() self.margin = margin self.alpha = alpha self.ignore_index = ignore_index self.cross_entropy = nn.CrossEntropyLoss() @property def names(self): return "loss", "loss_ce", "loss_mixup" def get_logit_diff(self, inputs, mixup): max_values, indices = inputs.max(dim=1) max_values = max_values.unsqueeze(dim=1) max_values_mixup, indices_mixup = mixup.max(dim=1) max_values_mixup = max_values_mixup.unsqueeze(dim=1) # diff = max_values - max_values_mixup diff = max_values_mixup - max_values return diff def get_conf_diff(self, inputs, mixup): inputs = F.softmax(inputs, dim=1) max_values, indices = inputs.max(dim=1) max_values = max_values.unsqueeze(dim=1) mixup = F.softmax(mixup, dim=1) max_values_mixup, indices_mixup = mixup.max(dim=1) max_values_mixup = max_values_mixup.unsqueeze(dim=1) # diff = max_values - max_values_mixup diff = max_values_mixup - max_values return diff def forward(self, inputs, targets, mixup, target_re, lam): if inputs.dim() > 2: inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C targets = targets.view(-1) if self.ignore_index >= 0: index = torch.nonzero(targets != self.ignore_index).squeeze() inputs = inputs[index, :] targets = targets[index] loss_ce = self.cross_entropy(inputs, targets) self_mixup_mask = (target_re == 1.0).sum(dim=1).reshape(1, -1) self_mixup_mask = (self_mixup_mask.sum(dim=0) == 0.0) # diff = self.get_conf_diff(inputs, mixup) # using probability diff = self.get_logit_diff(inputs, mixup) loss_mixup = (self_mixup_mask * F.relu(diff+self.margin)).mean() loss = loss_ce + self.alpha * loss_mixup return loss, loss_ce, loss_mixup class RankMixup_MNDCG(nn.Module): def __init__(self, num_classes: int = 10, alpha: float = 0.1, ignore_index: int =-100): super().__init__() self.alpha = alpha self.ignore_index = ignore_index self.num_classes = num_classes self.cross_entropy = nn.CrossEntropyLoss() @property def names(self): return "loss", "loss_ce", "loss_mixup" def get_indcg(self, inputs, mixup, lam, target_re): mixup = mixup.reshape(len(lam), -1, self.num_classes) # mixup num x batch x num class target_re = target_re.reshape(len(lam), -1, self.num_classes) # mixup num x batch x num class mixup = F.softmax(mixup, dim=2) inputs = F.softmax(inputs, dim=1) inputs_lam = torch.ones(inputs.size(0), 1, device=inputs.device) max_values = inputs.max(dim=1, keepdim=True)[0] max_mixup = mixup.max(dim=2)[0].t() # batch x mixup num max_lam = target_re.max(dim=2)[0].t() # batch x mixup num # compute dcg sort_index = torch.argsort(max_lam, descending=True) max_mixup_sorted = torch.gather(max_mixup, 1, sort_index) order = torch.arange(1, 2+len(lam), device = max_mixup.device) dcg_order = torch.log2(order + 1) max_mixup_sorted = torch.cat((max_values, max_mixup_sorted), dim=1) dcg = (max_mixup_sorted / dcg_order).sum(dim=1) max_lam_sorted = torch.gather(max_lam, 1, sort_index) max_lam_sorted = torch.cat((inputs_lam, max_lam_sorted), dim=1) idcg = (max_lam_sorted / dcg_order).sum(dim=1) #compute ndcg ndcg = dcg / idcg inv_ndcg = idcg / dcg ndcg_mask = (idcg > dcg) ndcg = ndcg_mask * ndcg + (~ndcg_mask) * inv_ndcg return ndcg def forward(self, inputs, targets, mixup, target_re, lam): if inputs.dim() > 2: inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C targets = targets.view(-1) if self.ignore_index >= 0: index = torch.nonzero(targets != self.ignore_index).squeeze() inputs = inputs[index, :] targets = targets[index] loss_ce = self.cross_entropy(inputs, targets) #NDCG loss loss_mixup = (1.0 - self.get_indcg(inputs, mixup, lam, target_re)).mean() loss = loss_ce + self.alpha * loss_mixup return loss, loss_ce, loss_mixup
5,114
Python
.py
104
38.807692
101
0.582894
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,925
penalty_entropy.py
cvlab-yonsei_RankMixup/calibrate/losses/penalty_entropy.py
import torch import torch.nn as nn import torch.nn.functional as F from calibrate.utils.constants import EPS class PenaltyEntropy(nn.Module): """Regularizing neural networks by penalizing confident output distributions, 2017. <https://arxiv.org/pdf/1701.06548> loss = CE - alpha * Entropy(p) """ def __init__(self, alpha=1.0, ignore_index=-100): super().__init__() self.alpha = alpha self.ignore_index = ignore_index @property def names(self): return "loss", "loss_ce", "loss_ent" def forward(self, inputs, targets): if inputs.dim() > 2: inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C targets = targets.view(-1) if self.ignore_index >= 0: index = torch.nonzero(targets != self.ignore_index).squeeze() inputs = inputs[index, :] targets = targets[index] # cross entropy loss_ce = F.cross_entropy(inputs, targets) # entropy prob = F.log_softmax(inputs, dim=1).exp() prob = torch.clamp(prob, EPS, 1.0 - EPS) ent = - prob * torch.log(prob) loss_ent = ent.mean() loss = loss_ce - self.alpha * loss_ent return loss, loss_ce, loss_ent
1,439
Python
.py
34
34.058824
122
0.591105
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,926
logit_margin_l1.py
cvlab-yonsei_RankMixup/calibrate/losses/logit_margin_l1.py
import torch import torch.nn as nn import torch.nn.functional as F class LogitMarginL1(nn.Module): """Add marginal penalty to logits: CE + alpha * max(0, max(l^n) - l^n - margin) Args: margin (float, optional): The margin value. Defaults to 10. alpha (float, optional): The balancing weight. Defaults to 0.1. ignore_index (int, optional): Specifies a target value that is ignored during training. Defaults to -100. The following args are related to balancing weight (alpha) scheduling. Note all the results presented in our paper are obtained without the scheduling strategy. So it's fine to ignore if you don't want to try it. schedule (str, optional): Different stragety to schedule the balancing weight alpha or not: "" | add | multiply | step. Defaults to "" (no scheduling). To activate schedule, you should call function `schedula_alpha` every epoch in your training code. mu (float, optional): scheduling weight. Defaults to 0. max_alpha (float, optional): Defaults to 100.0. step_size (int, optional): The step size for updating alpha. Defaults to 100. """ def __init__(self, margin: float = 10, alpha: float = 0.1, ignore_index: int = -100, schedule: str = "", mu: float = 0, max_alpha: float = 100.0, step_size: int = 100): super().__init__() assert schedule in ("", "add", "multiply", "step") self.margin = margin self.alpha = alpha self.ignore_index = ignore_index self.mu = mu self.schedule = schedule self.max_alpha = max_alpha self.step_size = step_size self.cross_entropy = nn.CrossEntropyLoss() @property def names(self): return "loss", "loss_ce", "loss_margin_l1" def schedule_alpha(self, epoch): """Should be called in the training pipeline if you want to se schedule alpha """ if self.schedule == "add": self.alpha = min(self.alpha + self.mu, self.max_alpha) elif self.schedule == "multiply": self.alpha = min(self.alpha * self.mu, self.max_alpha) elif self.schedule == "step": if (epoch + 1) % self.step_size == 0: self.alpha = min(self.alpha * self.mu, self.max_alpha) def get_diff(self, inputs): max_values = inputs.max(dim=1) max_values = max_values.values.unsqueeze(dim=1).repeat(1, inputs.shape[1]) diff = max_values - inputs return diff def forward(self, inputs, targets): if inputs.dim() > 2: inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C targets = targets.view(-1) if self.ignore_index >= 0: index = torch.nonzero(targets != self.ignore_index).squeeze() inputs = inputs[index, :] targets = targets[index] loss_ce = self.cross_entropy(inputs, targets) # get logit distance diff = self.get_diff(inputs) # linear penalty where logit distances are larger than the margin loss_margin = F.relu(diff-self.margin).mean() loss = loss_ce + self.alpha * loss_margin return loss, loss_ce, loss_margin
3,580
Python
.py
77
36.454545
97
0.591404
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,927
focal_loss_adaptive_gamma.py
cvlab-yonsei_RankMixup/calibrate/losses/focal_loss_adaptive_gamma.py
''' Implementation of Focal Loss with adaptive gamma. Reference: [1] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, Focal loss for dense object detection. arXiv preprint arXiv:1708.02002, 2017. ''' import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from scipy.special import lambertw import numpy as np def get_gamma(p=0.2): ''' Get the gamma for a given pt where the function g(p, gamma) = 1 ''' y = ((1-p)**(1-(1-p)/(p*np.log(p)))/(p*np.log(p)))*np.log(1-p) gamma_complex = (1-p)/(p*np.log(p)) + lambertw(-y + 1e-12, k=-1)/np.log(1-p) gamma = np.real(gamma_complex) #gamma for which p_t > p results in g(p_t,gamma)<1 return gamma ps = [0.2, 0.5] gammas = [5.0, 3.0] i = 0 gamma_dic = {} for p in ps: gamma_dic[p] = gammas[i] i += 1 class FocalLossAdaptive(nn.Module): def __init__(self, gamma=0, ignore_index=-100, size_average=False, device=None): super(FocalLossAdaptive, self).__init__() self.size_average = size_average self.gamma = gamma self.ignore_index = ignore_index self.device = device def get_gamma_list(self, pt): gamma_list = [] batch_size = pt.shape[0] for i in range(batch_size): pt_sample = pt[i].item() if (pt_sample >= 0.5): gamma_list.append(self.gamma) continue # Choosing the gamma for the sample for key in sorted(gamma_dic.keys()): if pt_sample < key: gamma_list.append(gamma_dic[key]) break return torch.tensor(gamma_list).to(self.device) def forward(self, input, target): if input.dim() > 2: input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W input = input.transpose(1,2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1,1) if self.ignore_index >= 0: index = torch.nonzero(target.squeeze() != self.ignore_index).squeeze() input = input[index, :] target = target[index, :] logpt = F.log_softmax(input, dim=1) logpt = logpt.gather(1,target) logpt = logpt.view(-1) pt = logpt.exp() gamma = self.get_gamma_list(pt) loss = -1 * (1-pt)**gamma * logpt if self.size_average: return loss.mean() else: return loss.sum()
2,524
Python
.py
66
30.727273
99
0.580645
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,928
focal_loss.py
cvlab-yonsei_RankMixup/calibrate/losses/focal_loss.py
''' Implementation of Focal Loss. Reference: [1] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, Focal loss for dense object detection. arXiv preprint arXiv:1708.02002, 2017. ''' import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma=0, ignore_index=-100, size_average=False): super(FocalLoss, self).__init__() self.gamma = gamma self.ignore_index = ignore_index self.size_average = size_average def forward(self, input, target): if input.dim() > 2: input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W input = input.transpose(1, 2) # N,C,H*W => N,H*W,C input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C target = target.view(-1, 1) if self.ignore_index >= 0: index = torch.nonzero(target.squeeze() != self.ignore_index).squeeze() input = input[index, :] target = target[index, :] logpt = F.log_softmax(input) logpt = logpt.gather(1, target) logpt = logpt.view(-1) pt = logpt.exp() loss = -1 * (1-pt)**self.gamma * logpt if self.size_average: return loss.mean() else: return loss.sum()
1,336
Python
.py
34
31.588235
99
0.580695
cvlab-yonsei/RankMixup
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,929
fruitshopgame.py
deepikamerlin_Fruitshop-Game/fruitshopgame.py
import random fruitbag = ["Apple","Orange","Banana","Pear","Grapes","Strawberry","Blueberry","Watermelon","Pineapple","Mango","Peach","Cherry","Kiwi", "Lemon","Lime","Plum","Raspberry","Blackberry","Cantaloupe","Fig","Grapefruit","Pomegranate","Apricot","Nectarine","Papaya","Coconut", "Passion Fruit","Guava","Lychee","Persimmon","Dragon Fruit","Avocado","Tangerine","Cranberry","Date","Kumquat","Star Fruit","Honeydew Melon", "Jackfruit","Mangosteen","Quince","Ugli Fruit","Soursop","Tamarind","Plantain","Feijoa","Bilberry","Salak","Ackee","Barbados Cherry","Longan", "Kiwano (Horned Melon)","Rambutan","Sapodilla","Breadfruit","Cherimoya","Durian","Jujube","Mulberry","Pawpaw","Prickly Pear","Feijoa", "Surinam Cherry","Sweetsop","Tamarillo","Yangmei (Yumberry)"] print("Guess the fruits in the bag of fruits :") print("\n",fruitbag) shopkeeper = random.choice(fruitbag) #print("\nShopkeepers Selection =",shopkeeper) print("\nLet's move on to the game") while True: player=input("\nEnter the fruitname :") if player not in fruitbag: Print("\nThere is no fruit like this") if player == shopkeeper: print("\nYou Won this game") print("Your Selection =",shopkeeper) print("\nDo you want to continue the game again ???[Yes(y/Y),No(n/N)]") play_again = input("Please Select anyone :") if play_again == "y" or "Y": shopkeeper = random.choice(fruitbag) #print("\nShopkeepers Selection =",shopkeeper) elif play_again == "n" or "N": print("\nThank you for your participation in this game") print("You are welcome again...") break else: print("\nYou lost the winning chance") print("Try again to Win this game") shopkeeper = random.choice(fruitbag)
1,848
Python
.py
32
50.375
147
0.645925
deepikamerlin/Fruitshop-Game
8
0
0
EPL-2.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,930
ui.py
andreafailla_pix2beats/ui.py
import json # io import tempfile import streamlit as st # UI from PIL import Image # image processing from backend import resize_and_convert, trackmaker # processing from backend import rolling_title # animation from constants import SCALES, NOTES, HARMONIES, SAMPLE_IMAGES # constants from my_presets import PRESETS def init_session_state(): """ Initialize the session state with the default parameters :return: """ for k, v in PRESETS["None"].items(): if k not in st.session_state: if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def update_session_state(preset): """ Update the session state with the parameters of the selected preset :param preset: :return: """ for k, v in preset.items(): if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def write_intro(): """ Write the intro of the app and define settings :return: """ st.set_page_config( page_title="Pix2Beats", page_icon=":musical_note:", layout="centered", initial_sidebar_state="expanded", ) st.markdown( """ <style> .stApp { background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"); background-size: cover; background-opacity: 0; } </style>""", unsafe_allow_html=True, ) st.title(":blue[Pix]2:red[Beats]") plh = st.empty() # Display the description st.markdown( """ Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression. Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds, unlocking a fascinating synergy between the realms of visual and auditory creativity. At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly represented as matrices of numbers. This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales. Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note: """ ) return plh def handle_presets(): """ Function to choose and/or upload presets :return: """ presetsel, presetupl, _ = st.columns([1, 1, 2]) with presetsel: preset_name = st.selectbox( "***Choose a preset***", PRESETS.keys(), key="preset_select", help="Tip: you can modify an existing preset by selecting it and then selecting " "*None* from this list.", ) if preset_name is not None: if preset_name != "None": update_session_state(PRESETS[preset_name]) with presetupl: uploaded_preset = st.file_uploader( "***...or upload your own!***", type=["json"] ) css = """ <style> [data-testid='stFileUploader'] { width: max-content; } [data-testid='stFileUploader'] section { padding: 0; float: left; } [data-testid='stFileUploader'] section > input + div { display: none; } [data-testid='stFileUploader'] section + div { float: right; padding-top: 0; } </style> """ st.markdown(css, unsafe_allow_html=True) if uploaded_preset is not None: preset_name = uploaded_preset.name.split(".")[0] preset = json.load(uploaded_preset) PRESETS[preset_name] = preset update_session_state(preset) def make_sidebar_and_select_file(): """ Create the sidebar for the app The sidebar lets the user select an image to use :return: the image filename """ filename = None if ( st.sidebar.radio( "Image to use", ("Use Example Image", "Upload Image"), label_visibility="hidden", ) == "Use Example Image" ): filename = st.sidebar.selectbox("Choose a sample image", SAMPLE_IMAGES) img = Image.open(filename) else: img = st.sidebar.file_uploader("Upload an image", type=["jpg", "png", "jpeg"]) if img is not None: filename = img.name img = Image.open(img) filename = tmpdir + "/" + filename img.save(filename) # Display the image if filename is not None: st.sidebar.image(img) return filename def make_widgets_and_get_parameters(): """ UI to get the parameters required to generate the track :return: list of parameters """ col1, col2, col3 = st.columns([1, 1, 2]) with col1: scale_options = list(SCALES.keys()) scale = st.selectbox("***Choose the scale***", scale_options, key="scale") key = st.selectbox("***Choose the key***", NOTES, key="key") with col2: octave_options = ["Low", "Mid", "High"] octave = st.selectbox("***Choose the octave***", octave_options, key="octave") octave = octave_options.index(octave) + 1 harmony_options = list(HARMONIES.keys()) harmony = st.selectbox( "*Choose how to harmonize*", harmony_options, key="harmony" ) with col3: t_value = st.slider( "***Note duration (seconds)***", min_value=0.10, max_value=1.0, step=0.01, key="t_value", ) n_pixels = st.slider( "***Pixels to sample***", min_value=64, max_value=320, step=1, key="n_pixels", ) randomize_octaves = st.checkbox( "***Randomize octaves***", key="randomize_octaves", help="If checked, the octaves of the notes will be randomized. " "Otherwise, the notes will be played in the same octave.", ) resize_to_n_pixels = st.checkbox( "***Resize image to N pixels***", key="resize_to_n_pixels", help="If checked, the image will be resized to N pixels. " "Otherwise, the image will be used as is. " "N is the number of pixels selected above.", ) # ***Start Pedalboard Definitions*** st.markdown("## Pedalboard") with st.expander("###### Click here to see the pedalboard"): col4, col5, col6, col7 = st.columns(4) # Chorus Parameters with col4: st.markdown("### Chorus") rate_hz_chorus = st.slider( "rate_hz", min_value=0.0, max_value=100.0, step=0.1, key="rate_hz_chorus", help="The rate_hz parameter controls the rate of the chorus effect. ", ) # Delay Parameters with col5: st.markdown("### Delay") delay_seconds = st.slider( "delay_seconds", key="delay_seconds", min_value=0.0, max_value=2.0, step=0.1, help="The delay_seconds parameter controls the delay of the effect. ", ) # Distortion Parameters with col6: st.markdown("### Distortion") drive_db = st.slider( "drive_db", min_value=0.0, max_value=100.0, step=1.0, key="drive_db", help="The drive_db parameter controls the amount of distortion. ", ) # Gain Parameters with col7: st.markdown("### Gain") gain_db = st.slider( "gain_db", min_value=0.0, max_value=100.0, step=1.0, key="gain_db", help="The gain_db parameter controls the gain of the effect. ", ) st.markdown("### Reverb") rev1, rev2, rev3, rev4, rev5 = st.columns(5) # Reverb Parameters with rev1: room_size = st.slider( "room_size", min_value=0.0, max_value=1.0, step=0.1, key="room_size", help="The room_size parameter controls the size of the reverbing room. ", ) with rev2: damping = st.slider( "damping", min_value=0.0, max_value=1.0, step=0.1, key="damping" ) with rev3: wet_level = st.slider( "wet_level", min_value=0.0, max_value=1.0, step=0.1, key="wet_level", help="The wet_level parameter controls the amount of wet signal. ", ) with rev4: dry_level = st.slider( "dry_level", min_value=0.1, max_value=1.0, step=0.1, key="dry_level", help="The dry_level parameter controls the amount of dry signal. ", ) with rev5: width = st.slider( "width", min_value=0.0, max_value=1.0, step=0.1, key="width", help="The width parameter controls the width of the stereo image. ", ) st.markdown("### Ladder Filter") lf1, lf2, lf3 = st.columns(3) # Ladder Filter Parameters with lf1: cutoff_hz = st.slider( "cutoff_hz", min_value=0.0, max_value=1000.0, step=1.0, key="cutoff_hz", help="The cutoff_hz parameter controls the cutoff frequency of the filter. ", ) with lf2: resonance_lad = st.slider( "resonance", min_value=0.0, max_value=1.0, step=0.1, key="resonance_lad", help="The resonance parameter controls the resonance of the filter. ", ) with lf3: drive_lad = st.slider( "drive", min_value=1.0, max_value=100.0, step=0.1, key="drive_lad", help="The drive parameter controls the drive of the filter. ", ) return { "scale": scale, "key": key, "octave": octave, "harmony": harmony, "randomize_octaves": randomize_octaves, "resize_to_n_pixels": resize_to_n_pixels, "t_value": t_value, "n_pixels": n_pixels, "gain_db": gain_db, "drive_db": drive_db, "cutoff_hz": cutoff_hz, "resonance_lad": resonance_lad, "drive_lad": drive_lad, "delay_seconds": delay_seconds, "room_size": room_size, "damping": damping, "wet_level": wet_level, "dry_level": dry_level, "width": width, "rate_hz_chorus": rate_hz_chorus, } def export_buttons(filename, param_dict, track, tmpdir): """ Create the buttons to download the track and the preset :param filename: :param param_dict: :param track: :param tmpdir: :return: """ b0, b1, _ = st.columns([1, 1, 2], gap="small") with b0: exp_track_name = ( filename[len(tmpdir) + 1 :] if filename.startswith(tmpdir) else filename ) st.download_button( "Download Track", data=track, file_name=f"{exp_track_name}.wav", mime="audio/wav", ) with b1: exp_preset_name = ( filename.split("/")[-1] if filename.startswith(tmpdir) else filename ) st.download_button( "Export Preset", data=json.dumps(param_dict), file_name=f"{exp_preset_name}.json", mime="application/json", ) if __name__ == "__main__": # all newly created files will be deleted when the context manager exits with tempfile.TemporaryDirectory() as tmpdir: init_session_state() # tells to use the default parameters plh = write_intro() # returns placeholder for the rolling title handle_presets() # load/upload presets filename = make_sidebar_and_select_file() # select an image param_dict = make_widgets_and_get_parameters() if filename is not None: # convert the image to RGB and resize it if necessary img = resize_and_convert( filename, tmpdir=tmpdir, n_pixels=param_dict["n_pixels"] if param_dict["resize_to_n_pixels"] else None, ) del param_dict["resize_to_n_pixels"] # convert to HSV, obtain signal, apply effects, write to disk track = trackmaker(img, **param_dict) # Display the track st.audio(track, format="audio/wav") export_buttons(filename, param_dict, track, tmpdir) # footer at the bottom of the sidebar st.sidebar.markdown( """ <style> .sidebar .sidebar-content { background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"); background-size: cover; background-opacity: 0; } a:link , a:visited{ color: white; background-color: transparent; text-decoration: underline; } a:hover, a:active { color: red; background-color: transparent; text-decoration: underline; } </style> Developed by <a href=https://linktr.ee/andreafailla>Andrea Failla</a><br> Powered by <img src="https://user-images.githubusercontent.com/7164864/217935870-c0bc60a3-6fc0-4047-b011-7b4c59488c91.png" style="width:20px;height:10px;"><br> Leave a :star: on <a href=https://github.com/andreafailla/pix2beats>GitHub</a>! """, unsafe_allow_html=True, ) towrite = "Where every image tells a unique musical story" rolling_title(plh, towrite, 0.05)
14,877
Python
.py
412
25.40534
195
0.542353
andreafailla/pix2beats
8
1
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,931
my_presets.py
andreafailla_pix2beats/my_presets.py
PRESETS = { "None": { "scale": "Major", "key": "A", "octave": 2, "harmony": "None", "randomize_octaves": True, "resize_to_n_pixels": False, "t_value": 0.2, "n_pixels": 64, "gain_db": 0.0, "drive_db": 0.0, "cutoff_hz": 0.0, "resonance_lad": 0.0, "drive_lad": 1.0, "delay_seconds": 0.0, "room_size": 0.0, "damping": 0.0, "wet_level": 0.0, "dry_level": 0.1, "width": 0.0, "rate_hz_chorus": 0.0, }, "Bitcrusher": { "scale": "Natural Minor", "key": "G", "octave": 2, "harmony": "Perfect fifth", "randomize_octaves": True, "resize_to_n_pixels": False, "t_value": 0.1, "n_pixels": 100, "gain_db": 9.0, "drive_db": 14.0, "cutoff_hz": 81.0, "resonance_lad": 0.4, "drive_lad": 5.8, "delay_seconds": 0.0, "room_size": 0.1, "damping": 0.0, "wet_level": 0.0, "dry_level": 0.3, "width": 0.0, "rate_hz_chorus": 0.0, }, "Sleepy Silly Penguin": { "scale": "Dorian", "key": "F", "octave": 3, "harmony": "Major third", "randomize_octaves": False, "t_value": 0.22, "n_pixels": 143, "gain_db": 0.0, "drive_db": 0.0, "cutoff_hz": 0.0, "resonance_lad": 0.0, "drive_lad": 1.0, "delay_seconds": 0.0, "room_size": 0.0, "damping": 0.0, "wet_level": 0.0, "dry_level": 0.1, "width": 0.0, "rate_hz_chorus": 0.3, }, "Underground Cave": { "scale": "Mixolydian", "key": "C", "octave": 2, "harmony": "Major sixth", "randomize_octaves": False, "t_value": 0.2, "n_pixels": 219, "gain_db": 0.0, "drive_db": 0.0, "cutoff_hz": 0.0, "resonance_lad": 0.2, "drive_lad": 1.0, "delay_seconds": 0.1, "room_size": 0.2, "damping": 0.3, "wet_level": 0.0, "dry_level": 0.1, "width": 0.0, "rate_hz_chorus": 1.4, }, "Distorted Bass": { "scale": "Aeolian", "key": "A#", "octave": 1, "harmony": "None", "randomize_octaves": False, "t_value": 0.3, "n_pixels": 64, "gain_db": 12.0, "drive_db": 4.0, "cutoff_hz": 0.0, "resonance_lad": 0.2, "drive_lad": 1.0, "delay_seconds": 0.0, "room_size": 0.1, "damping": 0.0, "wet_level": 0.0, "dry_level": 0.6, "width": 0.0, "rate_hz_chorus": 0.0, }, "Bitcrusher (re:)": { "scale": "Natural Minor", "key": "G", "octave": 3, "harmony": "Major seventh", "randomize_octaves": True, "t_value": 0.1, "n_pixels": 100, "gain_db": 9.0, "drive_db": 14.0, "cutoff_hz": 81.0, "resonance_lad": 0.4, "drive_lad": 5.8, "delay_seconds": 0.0, "room_size": 0.1, "damping": 0.0, "wet_level": 0.0, "dry_level": 0.3, "width": 0.0, "rate_hz_chorus": 0.0, }, }
3,298
Python
.py
130
16.861538
36
0.424874
andreafailla/pix2beats
8
1
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,932
constants.py
andreafailla_pix2beats/constants.py
SAMPLE_IMAGES = ["mona_lisa.png", "pixel_art_landscape.png", "sunflower.png"] SAMPLE_RATE = 22050 # standard audio sample rate FADE_DURATION = 0.015 # duration of fade in and fade out NOTES = ["A", "A#", "B", "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#"] PIANO_NOTES = { "A0": 27.5, "A#0": 29.13523509488062, "B0": 30.86770632850775, "C0": 32.70319566257483, "C#0": 34.64782887210901, "D0": 36.70809598967594, "D#0": 38.890872965260115, "E0": 41.20344461410875, "F0": 43.653528929125486, "F#0": 46.2493028389543, "G0": 48.999429497718666, "G#0": 51.91308719749314, "A1": 55.0, "A#1": 58.27047018976124, "B1": 61.7354126570155, "C1": 65.40639132514966, "C#1": 69.29565774421802, "D1": 73.41619197935188, "D#1": 77.78174593052023, "E1": 82.4068892282175, "F1": 87.30705785825097, "F#1": 92.4986056779086, "G1": 97.99885899543733, "G#1": 103.82617439498628, "A2": 110.0, "A#2": 116.54094037952248, "B2": 123.47082531403103, "C2": 130.8127826502993, "C#2": 138.59131548843604, "D2": 146.8323839587038, "D#2": 155.56349186104046, "E2": 164.81377845643496, "F2": 174.61411571650194, "F#2": 184.9972113558172, "G2": 195.99771799087463, "G#2": 207.65234878997256, "A3": 220.0, "A#3": 233.08188075904496, "B3": 246.94165062806206, "C3": 261.6255653005986, "C#3": 277.1826309768721, "D3": 293.6647679174076, "D#3": 311.1269837220809, "E3": 329.6275569128699, "F3": 349.2282314330039, "F#3": 369.9944227116344, "G3": 391.99543598174927, "G#3": 415.3046975799451, "A4": 440.0, "A#4": 466.1637615180899, "B4": 493.8833012561241, "C4": 523.2511306011972, "C#4": 554.3652619537442, "D4": 587.3295358348151, "D#4": 622.2539674441618, "E4": 659.2551138257398, "F4": 698.4564628660078, "F#4": 739.9888454232688, "G4": 783.9908719634985, "G#4": 830.6093951598903, "A5": 880.0, "A#5": 932.3275230361799, "B5": 987.7666025122483, "C5": 1046.5022612023945, "C#5": 1108.7305239074883, "D5": 1174.6590716696303, "D#5": 1244.5079348883237, "E5": 1318.5102276514797, "F5": 1396.9129257320155, "F#5": 1479.9776908465376, "G5": 1567.981743926997, "G#5": 1661.2187903197805, "A6": 1760.0, "A#6": 1864.6550460723597, "B6": 1975.533205024496, "C6": 2093.004522404789, "C#6": 2217.4610478149766, "D6": 2349.31814333926, "D#6": 2489.0158697766474, "E6": 2637.02045530296, "F6": 2793.825851464031, "F#6": 2959.955381693075, "G6": 3135.9634878539946, "G#6": 3322.437580639561, "A7": 3520.0, "A#7": 3729.3100921447194, "B7": 3951.066410048992, "C7": 4186.009044809578, "C#7": 4434.922095629953, "D7": 4698.63628667852, "D#7": 4978.031739553295, "E7": 5274.04091060592, "F7": 5587.651702928062, "F#7": 5919.91076338615, "G7": 6271.926975707989, "G#7": 6644.875161279122, "A8": 7040.0, "A#8": 7458.620184289437, "B8": 7902.132820097988, "C8": 8372.018089619156, "": 0.0, } SCALES = { "Major": [0, 2, 4, 5, 7, 9, 11], "Natural Minor": [0, 2, 3, 5, 7, 8, 10], "Dorian": [0, 2, 3, 5, 7, 9, 10], "Mixolydian": [0, 2, 4, 5, 7, 9, 10], "Aeolian": [0, 2, 3, 5, 7, 8, 10], "Phrygian": [0, 1, 3, 5, 7, 8, 10], "Lydian": [0, 2, 4, 6, 7, 9, 11], "Harmonic Minor": [0, 2, 3, 5, 7, 8, 11], "Melodic Minor": [0, 2, 3, 5, 7, 8, 9, 10, 11], "Locrian": [0, 1, 3, 5, 6, 8, 10], "Blues": [0, 2, 3, 4, 5, 7, 9, 10, 11], "Chromatic": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], } HARMONIES = { "None": 1, "Major second": 9 / 8, "Minor third": 6 / 5, "Major third": 5 / 4, "Perfect fourth": 4 / 3, "Diatonic tritone": 45 / 32, "Perfect fifth": 3 / 2, "Minor sixth": 8 / 5, "Major sixth": 5 / 3, "Minor seventh": 9 / 5, "Major seventh": 15 / 8, } HSV_THRESHOLDS = [26, 52, 78, 104, 128, 154, 180]
4,040
Python
.py
135
25.2
77
0.58312
andreafailla/pix2beats
8
1
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,933
backend.py
andreafailla_pix2beats/backend.py
""" Loosely based on https://github.com/victormurcia/Making-Music-From-Images/blob/main/music_to_images.py """ import os import random import time import numpy as np # image from PIL import Image # audio from pedalboard import Pedalboard, Chorus, Reverb, Gain, LadderFilter, Delay, Distortion from pedalboard.io import AudioFile from scipy.io import wavfile from constants import * # reproducibility random.seed(42) def rolling_title(placeholder, text, delay=0.05): """ Displays title with rolling effect Placeholder is the container where the title will be displayed """ while True: for i in range(len(text)): time.sleep(delay) placeholder.markdown(f"#### {text[:i + 1]}") time.sleep(1) for i in range(len(text)): time.sleep(delay) placeholder.markdown(f"#### {text[:len(text) - i]}") def resize_and_convert(filename, tmpdir, n_pixels=None): """ Resize the image, convert to hsv, and save as png :param filename: :param tmpdir: :param n_pixels: :return: """ # Saves img = Image.open(filename).convert("RGB") if n_pixels is not None: # Calculate the aspect ratio aspect_ratio = img.width / img.height # Calculate the new width based on the desired number of pixels new_width = int((n_pixels * aspect_ratio) ** 0.5) # Resize the image while maintaining the aspect ratio img = img.resize((new_width, int(new_width / aspect_ratio))) if not filename.startswith(tmpdir): img.save(f"{tmpdir}/{filename.split('.')[0]}_resized.png", "PNG") return img def get_scale(octave, key, scale_name): """ returns the scale as a list of frequencies :param octave: :param key: :param scale_name: :return: """ # Find index of desired key idx = NOTES.index(key) # Redefine scale interval so that scale intervals begin with whichKey new_scale = NOTES[idx:12] + NOTES[:idx] # Choose scale scale = SCALES.get(scale_name) if scale is None: print("Invalid scale name") return # Initialize arrays freqs = [] for i in range(len(scale)): note = new_scale[scale[i]] + str(octave) freqs.append(PIANO_NOTES[note]) return freqs def hue2freq(h, scale_freqs): """ convert hue to frequency :param h: :param scale_freqs: :return: """ # hue to note for i in range(len(HSV_THRESHOLDS)): if i == len(HSV_THRESHOLDS) - 1 or ( HSV_THRESHOLDS[i] <= h < HSV_THRESHOLDS[i + 1] ): note = scale_freqs[i] break else: # Handle the case when hue is greater than the last threshold note = scale_freqs[0] return note def get_track_layers(img, scale, t, n_pixels, randomize_octaves, harmonize): """ Get the main track and the harmony layers as numpy arrays :param img: image :param scale: list of frequencies :param t: duration of each note in seconds :param n_pixels: number of pixels to sample and convert to notes :param randomize_octaves: whether to randomize the octaves of the notes :param harmonize: :return: """ # Get shape of image width, height = img.size # Initialize array that will contain Hues for every pixel in image hues = [] img = np.array(img) # Convert image to numpy array for val in range(n_pixels): i = random.randint(0, height - 1) j = random.randint(0, width - 1) hue = abs(img[i][j][0]) # This is the hue value at pixel coordinate (i,j) hues.append(hue) # Make dataframe containing hues and frequencies frequencies = [hue2freq(hue, scale) for hue in hues] track_layer = np.array([]) # This array will contain the track signal harmony_layer = np.array([]) # This array will contain the track harmony harmony_val = HARMONIES.get( harmonize ) # This will select the ratio for the desired harmony octaves = np.array( [0.5, 1, 2] ) # Go an octave below, same note, or go an octave above t = np.linspace(0, t, int(t * SAMPLE_RATE), endpoint=False) # To avoid clicking sounds, apply fade in and fade out fade_samples = int(FADE_DURATION * SAMPLE_RATE) fade_in = np.linspace(0, 1, fade_samples, endpoint=False) fade_out = np.linspace(1, 0, fade_samples, endpoint=False) for k in range(n_pixels): if randomize_octaves: octave = random.choice(octaves) else: octave = 1 val = octave * random.choice(frequencies) # Make note and harmony note note = 0.5 * np.sin(2 * np.pi * val * t) h_note = 0.5 * np.sin(2 * np.pi * val * t * harmony_val) note[:fade_samples] *= fade_in note[-fade_samples:] *= fade_out h_note[:fade_samples] *= fade_in h_note[-fade_samples:] *= fade_out # Place notes into corresponding arrays track_layer = np.concatenate([track_layer, note]) harmony_layer = np.concatenate([harmony_layer, h_note]) return track_layer, harmony_layer def apply_pb_effects( gain_db, drive_db, cutoff_hz, resonance_lad, drive_lad, delay_seconds, damping, room_size, wet_level, dry_level, width, rate_hz_chorus, audio, sr, ): board = Pedalboard( [ Gain(gain_db=gain_db), Distortion(drive_db=drive_db), LadderFilter( mode=LadderFilter.Mode.HPF12, cutoff_hz=cutoff_hz, resonance=resonance_lad, drive=drive_lad, ), Delay(delay_seconds=delay_seconds), Reverb( damping=damping, room_size=room_size, wet_level=wet_level, dry_level=dry_level, width=width, ), Chorus(rate_hz=rate_hz_chorus), ] ) return board(audio, sr) def trackmaker( img, scale, key, octave, harmony, randomize_octaves, t_value, n_pixels, gain_db, drive_db, cutoff_hz, resonance_lad, drive_lad, delay_seconds, room_size, damping, wet_level, dry_level, width, rate_hz_chorus, ): # Make the scale from parameters above scale_to_use = get_scale(octave, key, scale) # Make the track! track, harmony = get_track_layers( img, scale=scale_to_use, t=t_value, n_pixels=n_pixels, randomize_octaves=randomize_octaves, harmonize=harmony, ) # Write the track into a file track_combined = np.vstack((track, harmony)) wavfile.write( "track.wav", rate=SAMPLE_RATE, data=track_combined.T.astype(np.float32) ) # Read the track try: with AudioFile("track.wav", "r") as f: audio = f.read(f.frames) # Apply the pedalboard effects effected = apply_pb_effects( gain_db, drive_db, cutoff_hz, resonance_lad, drive_lad, delay_seconds, damping, room_size, wet_level, dry_level, width, rate_hz_chorus, audio, SAMPLE_RATE, ) # Write the audio back as a wav file: with AudioFile("track.wav", "w", SAMPLE_RATE, effected.shape[0]) as f: f.write(effected) # Read the processed track with open("track.wav", "rb") as f: audio_bytes = f.read() # Remove the track if os.path.exists("track.wav"): os.remove("track.wav") return audio_bytes except ValueError: return None
7,836
Python
.py
253
23.731225
88
0.605815
andreafailla/pix2beats
8
1
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,934
ttt_download_datasets.py
ttt-matching-based-vos_ttt_matching_vos/ttt_download_datasets.py
from argparse import ArgumentParser import gdown import os import tarfile import urllib.request import zipfile """ Arguments loading """ parser = ArgumentParser() parser.add_argument('--root_dir', default='datasets/video_segmentation') args = parser.parse_args() ##### DAVIS 2017 ######### davis_dir = os.path.join(args.root_dir, 'DAVIS/2017') os.makedirs(davis_dir, exist_ok=True) print('Downloading DAVIS 2017 trainval...') url = 'https://drive.google.com/uc?id=1kiaxrX_4GuW6NmiVuKGSGVoKGWjOdp6d' zip_filename = os.path.join(davis_dir, 'DAVIS-2017-trainval-480p.zip') gdown.download(url, output=zip_filename, quiet=False) with zipfile.ZipFile(zip_filename, 'r') as zip_file: zip_file.extractall(davis_dir) os.rename(os.path.join(davis_dir, 'DAVIS'), os.path.join(davis_dir, 'trainval')) os.remove(zip_filename) print('Downloading DAVIS 2017 testdev...') url = 'https://drive.google.com/uc?id=1fmkxU2v9cQwyb62Tj1xFDdh2p4kDsUzD' zip_filename = os.path.join(davis_dir, 'DAVIS-2017-test-dev-480p.zip') gdown.download(url, output=zip_filename, quiet=False) with zipfile.ZipFile(zip_filename, 'r') as zip_file: zip_file.extractall(davis_dir) os.rename(os.path.join(davis_dir, 'DAVIS'), os.path.join(davis_dir, 'test-dev')) os.remove(zip_filename) ##### YouTube VOS 2018 ######### print('Downloading YouTubeVOS2018 val...') youtube_dir = os.path.join(args.root_dir, 'YouTube2018') os.makedirs(youtube_dir, exist_ok=True) url = 'https://drive.google.com/uc?id=1-QrceIl5sUNTKz7Iq0UsWC6NLZq7girr' zip_filename = os.path.join(youtube_dir, 'valid.zip') gdown.download(url, output=zip_filename, quiet=False) with zipfile.ZipFile(zip_filename, 'r') as zip_file: zip_file.extractall(youtube_dir) os.remove(zip_filename) print('Downloading YouTubeVOS2018 all frames valid...') youtube_all_frames_dir = os.path.join(args.root_dir, 'YouTube2018/all_frames') os.makedirs(youtube_all_frames_dir, exist_ok=True) url = 'https://drive.google.com/uc?id=1yVoHM6zgdcL348cFpolFcEl4IC1gorbV' zip_filename = os.path.join(youtube_all_frames_dir, 'valid.zip') gdown.download(url, output=zip_filename, quiet=False) with zipfile.ZipFile(zip_filename, 'r') as zip_file: zip_file.extractall(youtube_all_frames_dir) os.remove(zip_filename) ##### DAVIS-C ######### print('Downloading DAVIS-C ...') davisc_dir = os.path.join(args.root_dir, 'DAVIS-C') os.makedirs(davisc_dir, exist_ok=True) url = "http://ptak.felk.cvut.cz/personal/toliageo/share/davisc/davisc.tar.gz" tar_filename = os.path.join(davisc_dir, "davisc.tar.gz") # Name of the output file urllib.request.urlretrieve(url, tar_filename) with tarfile.open(tar_filename, 'r:gz') as tar_file: tar_file.extractall(davisc_dir) os.remove(tar_filename) ##### MOSE ######### mose_dir = os.path.join(args.root_dir, 'MOSE') os.makedirs(mose_dir, exist_ok=True) print('Downloading MOSE valid ...') url="https://drive.google.com/uc?id=1yFoacQ0i3J5q6LmnTVVNTTgGocuPB_hR" tar_filename = os.path.join(mose_dir, 'valid.tar.gz') gdown.download(url, tar_filename, quiet=False) with tarfile.open(tar_filename, 'r:gz') as tar_file: tar_file.extractall(mose_dir) os.remove(tar_filename) print('Downloading MOSE train ...') url="https://drive.google.com/uc?id=16Ns7a_frLaCo2ug18UIUkzVYFQqyd4N0" tar_filename = os.path.join(mose_dir, 'train.tar.gz') gdown.download(url, tar_filename, quiet=False) with tarfile.open(tar_filename, 'r:gz') as tar_file: tar_file.extractall(mose_dir) os.remove(tar_filename)
3,447
Python
.py
77
43.233766
83
0.757819
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,935
inference_core.py
ttt-matching-based-vos_ttt_matching_vos/STCN/inference_core.py
import torch from inference_memory_bank import MemoryBank from model.eval_network import STCN from model.aggregate import aggregate from util.tensor_util import pad_divide_by class InferenceCore: def __init__(self, prop_net:STCN, images, num_objects, top_k=20, mem_every=5, include_last=False): self.prop_net = prop_net self.mem_every = mem_every self.include_last = include_last # True dimensions t = images.shape[1] h, w = images.shape[-2:] # Pad each side to multiple of 16 images, self.pad = pad_divide_by(images, 16) # Padded dimensions nh, nw = images.shape[-2:] self.images = images self.device = 'cuda' self.k = num_objects # Background included, not always consistent (i.e. sum up to 1) self.prob = torch.zeros((self.k+1, t, 1, nh, nw), dtype=torch.float32, device=self.device) self.prob[0] = 1e-7 self.t, self.h, self.w = t, h, w self.nh, self.nw = nh, nw self.kh = self.nh//16 self.kw = self.nw//16 self.mem_bank = MemoryBank(k=self.k, top_k=top_k) def encode_key(self, idx): result = self.prop_net.encode_key(self.images[:,idx].cuda()) return result def do_pass(self, key_k, key_v, idx, end_idx): self.mem_bank.add_memory(key_k, key_v) closest_ti = end_idx # Note that we never reach closest_ti, just the frame before it this_range = range(idx+1, closest_ti) end = closest_ti - 1 for ti in this_range: k16, qv16, qf16, qf8, qf4 = self.encode_key(ti) out_mask = self.prop_net.segment_with_query(self.mem_bank, qf8, qf4, k16, qv16) out_mask = aggregate(out_mask, keep_bg=True) self.prob[:,ti] = out_mask if ti != end: is_mem_frame = ((ti % self.mem_every) == 0) if self.include_last or is_mem_frame: prev_value = self.prop_net.encode_value(self.images[:,ti].cuda(), qf16, out_mask[1:]) prev_key = k16.unsqueeze(2) self.mem_bank.add_memory(prev_key, prev_value, is_temp=not is_mem_frame) return closest_ti def interact(self, mask, frame_idx, end_idx): """ mask represents the ground-truth masks associated to the frame idx, one mask per single object (pixel values are 0 or 1 depending if the pixel belongs to the object). """ mask, _ = pad_divide_by(mask.cuda(), 16) # soft the masks for the first frame + add background mask self.prob[:, frame_idx] = aggregate(mask, keep_bg=True) # KV pair for the interacting frame key_k, _, qf16, _, _ = self.encode_key(frame_idx) key_v = self.prop_net.encode_value(self.images[:,frame_idx].cuda(), qf16, self.prob[1:,frame_idx].cuda()) key_k = key_k.unsqueeze(2) # Propagate self.do_pass(key_k, key_v, frame_idx, end_idx)
3,003
Python
.py
63
38.206349
113
0.602604
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,936
inference_memory_bank.py
ttt-matching-based-vos_ttt_matching_vos/STCN/inference_memory_bank.py
import math import torch def softmax_w_top(x, top): """ Selects the top k memory bank correspondences for each query feature (and apply softmax on the top k correspondences). Every other correspondence is set to 0. """ values, indices = torch.topk(x, k=top, dim=1) x_exp = values.exp_() x_exp /= torch.sum(x_exp, dim=1, keepdim=True) # The types should be the same already # some people report an error here so an additional guard is added x.zero_().scatter_(1, indices, x_exp.type(x.dtype)) # B * THW * HW return x class MemoryBank: def __init__(self, k, top_k=20): self.top_k = top_k self.CK = None self.CV = None self.mem_k = None self.mem_v = None self.num_objects = k def _global_matching(self, mk, qk): # NE means number of elements -- typically T*H*W B, CK, NE = mk.shape # See supplementary material a_sq = mk.pow(2).sum(1).unsqueeze(2) ab = mk.transpose(1, 2) @ qk # L2 affinity matrix affinity = (2*ab-a_sq) / math.sqrt(CK) # B, NE, HW # keep only the top k, and apply a softmax on it affinity = softmax_w_top(affinity, top=self.top_k) # B, NE, HW return affinity def _readout(self, affinity, mv): return torch.bmm(mv, affinity) def match_memory(self, qk): k = self.num_objects a, t, h, w = qk.shape qk = qk.flatten(start_dim=2) # batch_size=1 , Ck, h*w if self.temp_k is not None: mk = torch.cat([self.mem_k, self.temp_k], 2) mv = torch.cat([self.mem_v, self.temp_v], 2) else: mk = self.mem_k mv = self.mem_v affinity = self._global_matching(mk, qk) # (h * w * nb images in memory) x (h * w) # One affinity for all readout_mem = self._readout(affinity.expand(k, -1, -1), mv) return readout_mem.view(k, self.CV, h, w) def add_memory(self, key, value, is_temp=False): # Temp is for "last frame" # Not always used # But can always be flushed self.temp_k = None self.temp_v = None key = key.flatten(start_dim=2) value = value.flatten(start_dim=2) if self.mem_k is None: # First frame, just shove it in self.mem_k = key self.mem_v = value self.CK = key.shape[1] self.CV = value.shape[1] else: if is_temp: self.temp_k = key self.temp_v = value else: self.mem_k = torch.cat([self.mem_k, key], 2) self.mem_v = torch.cat([self.mem_v, value], 2)
2,725
Python
.py
70
29.742857
95
0.560168
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,937
train.py
ttt-matching-based-vos_ttt_matching_vos/STCN/train.py
import datetime from os import path import math import random import numpy as np import torch from torch.utils.data import DataLoader, ConcatDataset import torch.distributed as distributed from model.model import STCNModel from dataset.static_dataset import StaticTransformDataset from dataset.vos_dataset import VOSDataset from util.logger import TensorboardLogger from util.hyper_para import HyperParameters from util.load_subset import load_sub_davis, load_sub_yv """ Initial setup """ # Init distributed environment distributed.init_process_group(backend="nccl") # Set seed to ensure the same initialization torch.manual_seed(14159265) np.random.seed(14159265) random.seed(14159265) print('CUDA Device count: ', torch.cuda.device_count()) # Parse command line arguments para = HyperParameters() para.parse() if para['benchmark']: torch.backends.cudnn.benchmark = True local_rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() torch.cuda.set_device(local_rank) print('I am rank %d in this world of size %d!' % (local_rank, world_size)) """ Model related """ if local_rank == 0: # Logging if para['id'].lower() != 'null': print('I will take the role of logging!') long_id = '%s_%s' % (datetime.datetime.now().strftime('%b%d_%H.%M.%S'), para['id']) else: long_id = None logger = TensorboardLogger(para['id'], long_id) logger.log_string('hyperpara', str(para)) # Construct the rank 0 model model = STCNModel(para, logger=logger, save_path=path.join('saves', long_id, long_id) if long_id is not None else None, local_rank=local_rank, world_size=world_size).train() else: # Construct model for other ranks model = STCNModel(para, local_rank=local_rank, world_size=world_size).train() # Load pertrained model if needed if para['load_model'] is not None: total_iter = model.load_model(para['load_model']) print('Previously trained model loaded!') else: total_iter = 0 if para['load_network'] is not None: model.load_network(para['load_network']) print('Previously trained network loaded!') """ Dataloader related """ # To re-seed the randomness everytime we start a worker def worker_init_fn(worker_id): return np.random.seed(torch.initial_seed()%(2**31) + worker_id + local_rank*100) def construct_loader(dataset): train_sampler = torch.utils.data.distributed.DistributedSampler(dataset, rank=local_rank, shuffle=True) train_loader = DataLoader(dataset, para['batch_size'], sampler=train_sampler, num_workers=para['num_workers'], worker_init_fn=worker_init_fn, drop_last=True, pin_memory=True) return train_sampler, train_loader def renew_vos_loader(max_skip): # //5 because we only have annotation for every five frames yv_dataset = VOSDataset(path.join(yv_root, 'JPEGImages'), path.join(yv_root, 'Annotations'), max_skip//5, is_bl=False, subset=load_sub_yv()) davis_dataset = VOSDataset(path.join(davis_root, 'JPEGImages', '480p'), path.join(davis_root, 'Annotations', '480p'), max_skip, is_bl=False, subset=load_sub_davis()) train_dataset = ConcatDataset([davis_dataset]*5 + [yv_dataset]) print('YouTube dataset size: ', len(yv_dataset)) print('DAVIS dataset size: ', len(davis_dataset)) print('Concat dataset size: ', len(train_dataset)) print('Renewed with skip: ', max_skip) return construct_loader(train_dataset) def renew_bl_loader(max_skip): train_dataset = VOSDataset(path.join(bl_root, 'JPEGImages'), path.join(bl_root, 'Annotations'), max_skip, is_bl=True) print('Blender dataset size: ', len(train_dataset)) print('Renewed with skip: ', max_skip) return construct_loader(train_dataset) """ Dataset related """ """ These define the training schedule of the distance between frames We will switch to skip_values[i] once we pass the percentage specified by increase_skip_fraction[i] Not effective for stage 0 training """ skip_values = [10, 15, 20, 25, 5] if para['stage'] == 0: static_root = path.expanduser(para['static_root']) fss_dataset = StaticTransformDataset(path.join(static_root, 'fss'), method=0) duts_tr_dataset = StaticTransformDataset(path.join(static_root, 'DUTS-TR'), method=1) duts_te_dataset = StaticTransformDataset(path.join(static_root, 'DUTS-TE'), method=1) ecssd_dataset = StaticTransformDataset(path.join(static_root, 'ecssd'), method=1) big_dataset = StaticTransformDataset(path.join(static_root, 'BIG_small'), method=1) hrsod_dataset = StaticTransformDataset(path.join(static_root, 'HRSOD_small'), method=1) # BIG and HRSOD have higher quality, use them more train_dataset = ConcatDataset([fss_dataset, duts_tr_dataset, duts_te_dataset, ecssd_dataset] + [big_dataset, hrsod_dataset]*5) train_sampler, train_loader = construct_loader(train_dataset) print('Static dataset size: ', len(train_dataset)) elif para['stage'] == 1: increase_skip_fraction = [0.1, 0.2, 0.3, 0.4, 0.8, 1.0] bl_root = path.join(path.expanduser(para['bl_root'])) train_sampler, train_loader = renew_bl_loader(5) renew_loader = renew_bl_loader else: # stage 2 or 3 increase_skip_fraction = [0.1, 0.2, 0.3, 0.4, 0.9, 1.0] # VOS dataset, 480p is used for both datasets yv_root = path.join(path.expanduser(para['yv_root']), 'train_480p') davis_root = path.join(path.expanduser(para['davis_root']), '2017', 'trainval') train_sampler, train_loader = renew_vos_loader(5) renew_loader = renew_vos_loader """ Determine current/max epoch """ total_epoch = math.ceil(para['iterations']/len(train_loader)) current_epoch = total_iter // len(train_loader) print('Number of training epochs (the last epoch might not complete): ', total_epoch) if para['stage'] != 0: increase_skip_epoch = [round(total_epoch*f) for f in increase_skip_fraction] # Skip will only change after an epoch, not in the middle print('The skip value will increase approximately at the following epochs: ', increase_skip_epoch[:-1]) """ Starts training """ # Need this to select random bases in different workers np.random.seed(np.random.randint(2**30-1) + local_rank*100) try: for e in range(current_epoch, total_epoch): print('Epoch %d/%d' % (e, total_epoch)) if para['stage']!=0 and e!=total_epoch and e>=increase_skip_epoch[0]: while e >= increase_skip_epoch[0]: cur_skip = skip_values[0] skip_values = skip_values[1:] increase_skip_epoch = increase_skip_epoch[1:] print('Increasing skip to: ', cur_skip) train_sampler, train_loader = renew_loader(cur_skip) # Crucial for randomness! train_sampler.set_epoch(e) # Train loop model.train() for data in train_loader: model.do_pass(data, total_iter) total_iter += 1 if total_iter >= para['iterations']: break finally: if not para['debug'] and model.logger is not None and total_iter>5000: model.save(total_iter) # Clean up distributed.destroy_process_group()
7,258
Python
.py
164
39.256098
117
0.694433
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,938
inference_core_yv.py
ttt-matching-based-vos_ttt_matching_vos/STCN/inference_core_yv.py
import torch from inference_memory_bank import MemoryBank from model.eval_network import STCN from model.aggregate import aggregate from util.tensor_util import pad_divide_by class InferenceCore: def __init__(self, prop_net:STCN, images, num_objects, top_k=20, mem_every=5, include_last=False, req_frames=None): self.prop_net = prop_net self.mem_every = mem_every self.include_last = include_last # We HAVE to get the output for these frames # None if all frames are required self.req_frames = req_frames self.top_k = top_k # True dimensions t = images.shape[1] h, w = images.shape[-2:] # Pad each side to multiple of 16 images, self.pad = pad_divide_by(images, 16) # Padded dimensions nh, nw = images.shape[-2:] self.images = images self.device = 'cuda' self.k = num_objects # Background included, not always consistent (i.e. sum up to 1) self.prob = torch.zeros((self.k+1, t, 1, nh, nw), dtype=torch.float32, device=self.device) self.prob[0] = 1e-7 self.t, self.h, self.w = t, h, w self.nh, self.nw = nh, nw self.kh = self.nh//16 self.kw = self.nw//16 # list of objects with usable memory self.enabled_obj = [] self.mem_banks = dict() def encode_key(self, idx): result = self.prop_net.encode_key(self.images[:,idx].cuda()) return result def do_pass(self, key_k, key_v, idx, end_idx): closest_ti = end_idx K, CK, _, H, W = key_k.shape _, CV, _, _, _ = key_v.shape for i, oi in enumerate(self.enabled_obj): if oi not in self.mem_banks: self.mem_banks[oi] = MemoryBank(k=1, top_k=self.top_k) self.mem_banks[oi].add_memory(key_k, key_v[i:i+1]) last_ti = idx # Note that we never reach closest_ti, just the frame before it this_range = range(idx+1, closest_ti) step = +1 end = closest_ti - 1 for ti in this_range: is_mem_frame = (abs(ti-last_ti) >= self.mem_every) # Why even work on it if it is not required for memory/output if (not is_mem_frame) and (not self.include_last) and (self.req_frames is not None) and (ti not in self.req_frames): continue k16, qv16, qf16, qf8, qf4 = self.encode_key(ti) # After this step all keys will have the same size out_mask = torch.cat([ self.prop_net.segment_with_query(self.mem_banks[oi], qf8, qf4, k16, qv16) for oi in self.enabled_obj], 0) out_mask = aggregate(out_mask, keep_bg=True) self.prob[0,ti] = out_mask[0] for i, oi in enumerate(self.enabled_obj): self.prob[oi,ti] = out_mask[i+1] if ti != end: if self.include_last or is_mem_frame: prev_value = self.prop_net.encode_value(self.images[:,ti].cuda(), qf16, out_mask[1:]) prev_key = k16.unsqueeze(2) for i, oi in enumerate(self.enabled_obj): self.mem_banks[oi].add_memory(prev_key, prev_value[i:i+1], is_temp=not is_mem_frame) if is_mem_frame: last_ti = ti return closest_ti def interact(self, mask, frame_idx, end_idx, obj_idx): # In youtube mode, we interact with a subset of object id at a time mask, _ = pad_divide_by(mask.cuda(), 16) # update objects that have been labeled self.enabled_obj.extend(obj_idx) # Set other prob of mask regions to zero mask_regions = (mask[1:].sum(0) > 0.5) self.prob[:, frame_idx, mask_regions] = 0 self.prob[obj_idx, frame_idx] = mask[obj_idx] self.prob[:, frame_idx] = aggregate(self.prob[1:, frame_idx], keep_bg=True) # KV pair for the interacting frame key_k, _, qf16, _, _ = self.encode_key(frame_idx) key_v = self.prop_net.encode_value(self.images[:,frame_idx].cuda(), qf16, self.prob[self.enabled_obj,frame_idx].cuda()) key_k = key_k.unsqueeze(2) # Propagate self.do_pass(key_k, key_v, frame_idx, end_idx)
4,317
Python
.py
90
37.244444
128
0.579638
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,939
eval_all_datasets_ttt.py
ttt-matching-based-vos_ttt_matching_vos/STCN/eval_all_datasets_ttt.py
import random import warnings warnings.filterwarnings("ignore") import torch.nn as nn from torch.utils.data import DataLoader from model.eval_network import STCN from model.losses import EntropyLoss from ttt.config.load_config import load_config from ttt.model.model_ttt import STCN_TTT from ttt.utils.meter import AverageMeterDict from ttt.utils.helper import * from ttt.dataset.vos_dataset_ttt import VOSDataset def test_time_train_and_evaluate_one_video(args, video_data, pretrained_model): """ For a given video, runs the test time training process which updates the weights of the pre-trained model. Then evaluates the updated model on the given video. """ video_name = video_data['info']['name'][0] # Fix the seed for this video seed = args.seed output_seed_dir = os.path.join(args.output_dir, str(seed)) torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) # Initialize the training and inference models test_time_training_model = STCN_TTT().cuda().eval() test_time_inference_model = STCN().cuda().eval() weights = pretrained_model.state_dict() print('\nvideo:', video_name, 'shape', video_data['rgb'][0].shape, 'objects:', len(video_data['info']['labels'][0])) if args.ttt_number_iterations_per_jump_step > 0: test_time_training_model.copy_weights_from(weights) test_time_training_model.freeze_parse(args.ttt_frozen_layers) result_dir = os.path.join(output_seed_dir, 'final', video_name) if not os.path.exists(result_dir) or not len(os.listdir(result_dir)) or args.overwrite: # Run test time training logs, ttt_models = test_time_train_one_video( test_time_training_model, video_name, video_data, output_seed_dir, args) # Run test time inference test_time_evaluate_one_video(ttt_models, output_seed_dir, test_time_inference_model, video_data) # Save in logs if logs is not None: log_dir = os.path.join(output_seed_dir, 'logs') os.makedirs(log_dir, exist_ok=True) dump_logs(os.path.join(log_dir, video_name + '.txt'), logs) def test_time_train_one_video(model, video_name, vid_reader, result_dir, args): """ For a given video, runs the test time training process which updates the weights of the pre-trained model. """ ce_criterion = nn.CrossEntropyLoss() ent_criterion = EntropyLoss(dim=1) val_model = STCN().cuda().eval() model.copy_weights_to(val_model) video_inference(vid_reader, val_model, os.path.join(result_dir, 'temp'), args) # Parameters for VOSDataset frames_with_gt = sorted(list(vid_reader['info']['gt_obj'].keys())) if args.dataset_name == "youtube" else [0] max_obj = 6 all_objects = len(frames_with_gt) == 1 frame_dir, all_frames_dir = get_frame_dirs(args) iteration, ttt_models, logs = 0, dict(), [] for _ in range(args.ttt_number_jump_steps): for max_jump, num_frames in zip(args.ttt_max_jump_step, args.ttt_sequence_length): # Evaluate the current model and save the results in the temp folder if args.ttt_loss == "tt-mcc": model.copy_weights_to(val_model) video_inference(vid_reader, val_model, os.path.join(result_dir, 'temp'), args) dataset = VOSDataset(frame_dir, os.path.join(result_dir, 'temp'), video_name, max_jump, num_frames, total_sequences=args.ttt_number_iterations_per_jump_step * args.ttt_batch_size, resolution=args.ttt_resolution, # scale=args.ttt_scale, # ratio=args.ttt_ratio, augmentations=args.ttt_augmentations, check_last=args.ttt_loss == "tt-mcc", all_objects=all_objects, max_obj=max_obj, frames_with_gt=frames_with_gt, im_root_all_frames=all_frames_dir) train_loader = DataLoader(dataset, args.ttt_batch_size, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn) optimizer = torch.optim.Adam(filter( lambda p: p.requires_grad, model.parameters()), lr=args.ttt_lr, weight_decay=1e-7) scaler = torch.cuda.amp.GradScaler() meters = AverageMeterDict() for data in train_loader: optimizer.zero_grad() for k, v in data.items(): if type(v) != list and type(v) != dict and type(v) != int: data[k] = v.cuda(non_blocking=True) with torch.cuda.amp.autocast(enabled=args.amp): if args.ttt_loss == "tt-ae": logits_f, masks_f = model.do_single_pass(data) else: logits_f, logits_b, masks_f, masks_b = model.do_cycle_pass( data, backwards=args.ttt_loss == "tt-mcc", encode_first=False) # Loss if args.ttt_loss == "tt-mcc": # Mask Cycle Consistency loss = ce_criterion(logits_b[-1], data['cls_gt'][:, 0]) elif args.ttt_loss == "tt-ae": # Auto Encoder loss = ce_criterion(logits_f[0], data['cls_gt'][:, 0]) elif args.ttt_loss == "tt-ent": # Entropy loss = ent_criterion(torch.cat(logits_f, 0)) meters.update('loss', loss) if args.amp: scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() else: loss.backward() optimizer.step() iteration += 1 ttt_models['final'] = model.state_dict() model.copy_weights_to(val_model) return logs, ttt_models def test_time_evaluate_one_video(ttt_models, output_seed_dir, test_time_inference_model, data): for k, v in ttt_models.items(): result_dir = os.path.join(output_seed_dir, k) os.makedirs(result_dir, exist_ok=True) test_time_inference_model.load_state_dict(v) video_inference(data, test_time_inference_model, result_dir, args) def get_parameters(): args = load_config() if len(args.ttt_max_jump_step) > 1 and len(args.ttt_sequence_length) == 1: args.ttt_sequence_length = args.ttt_sequence_length * len(args.ttt_max_jump_step) elif len(args.ttt_sequence_length) > 1 and len(args.ttt_max_jump_step) == 1: args.ttt_max_jump_step = len(args.ttt_sequence_length) * args.ttt_max_jump_step elif len(args.ttt_max_jump_step) != len(args.ttt_sequence_length): raise Exception('ttt_max_jump_step and ttt_sequence_length should be of equal size or 1.') args.palette = get_palette(args) # load palette os.makedirs(args.output_dir, exist_ok=True) # create the output dir print('\nInput Arguments') print('---------------') for k, v in sorted(dict(vars(args)).items()): print('%s: %s' % (k, str(v))) print() return args if __name__ == '__main__': """ Arguments loading """ args = get_parameters() # Setup Dataset test_dataset = get_test_dataset(args) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4) # Load our checkpoint pretrained_model = get_stcn_model(args) for _, video_data in enumerate(test_loader): test_time_train_and_evaluate_one_video(args, video_data, pretrained_model)
7,930
Python
.py
149
40.946309
120
0.593463
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,940
load_config.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/config/load_config.py
import argparse import yaml import os """ Inspired from https://gist.github.com/robcowie/7c39e2afb140c905fdf2661b93ff3df9 """ class ConfigAction(argparse.Action): """Add configuration file to current defaults.""" def __init__(self, *args, **kwargs): """Config action is a search path, so a list, so one or more argument.""" super().__init__(*args, nargs='+', **kwargs) def __call__(self, parser, ns, values, option): """Change defaults for the namespace. still allows overriding from commandline options """ for path in values: parser.set_defaults(**self.parse_config(path)) def parse_config(self, path): """Abstract implementation of config file parsing.""" raise NotImplementedError() class YamlConfigAction(ConfigAction): """YAML config file parser action.""" def parse_config(self, path): try: with open(os.path.expanduser(path), 'r') as handle: return self.reformat(yaml.safe_load(handle)) except (FileNotFoundError, yaml.parser.ParserError) as e: raise argparse.ArgumentError(self, e) def reformat(self, yaml_dict): """ flattens and lowercase """ reformatted = {} for section_key in yaml_dict: for key in yaml_dict[section_key]: reformatted[key.lower()] = yaml_dict[section_key][key] return reformatted class ConfigArgumentParser(argparse.ArgumentParser): """Argument parser which supports parsing extra config files. Config files specified on the commandline through the YamlConfigAction arguments modify the default values on the spot. If a default is specified when adding an argument, it also gets immediately loaded. This will typically be used in a subclass, like this: self.add_argument('--config', action=YamlConfigAction, default=self.default_config()) """ def _add_action(self, action): # this overrides the add_argument() routine, which is where # actions get registered. it is done so we can properly load # the default config file before the action actually gets # fired. Ideally, we'd load the default config only if the # action *never* gets fired (but still setting defaults for # the namespace) but argparse doesn't give us that opportunity # (and even if it would, it wouldn't retroactively change the # Namespace object in parse_args() so it wouldn't work). action = super()._add_action(action) if isinstance(action, ConfigAction) and action.default is not None: # fire the action, later calls can override defaults try: action(self, None, action.default, None) except argparse.ArgumentError: # ignore errors from missing default pass def load_config(): parser = ConfigArgumentParser('test') parser.add_argument('--config_name', default='stcn_s01_mcc') # DATASET PARAMETERS parser.add_argument('--dataset_name', default='davis', choices=['davis', 'youtube', 'mose', 'davis-c']) parser.add_argument('--dataset_dir', default='prout', help='Path to the dataset') parser.add_argument('--corrupted_image_dir', default=None, help='Path to DAVIS-C corruption images') parser.add_argument('--split', default='val', choices=['val', 'testdev', 'valid', 'train', 'test'], help='Split of the dataset to be processed') parser.add_argument('--palette_video_name', default=None, type=str, help='Mask to get the palette colours') parser.add_argument('--video_set_filename', default=None, type=str, help='Filename containing a subset of videos to process') parser.add_argument('--output_dir', help='Path to output folder') parser.add_argument('--seed', default=1, type=int, help='Seed used for the random number generators') namespace, _ = parser.parse_known_args() config_filename = os.path.join("ttt_configs", f'{namespace.config_name}.yaml') parser.add_argument('--config', action=YamlConfigAction, default=[config_filename]) return parser.parse_args() if __name__ == "__main__": args = load_config() for arg in vars(args): print(f"arg {arg}, {getattr(args, arg)}") print(args.dataset_name)
4,415
Python
.py
87
42.701149
107
0.662027
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,941
save_config.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/config/save_config.py
import argparse import os import yaml def parse_arguments(): formatter = lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, max_help_position=80) parser = argparse.ArgumentParser( description='This is the training code of a video similarity network based on self-supervision', formatter_class=formatter) parser.add_argument('--config_name') parser.add_argument('--model_filename', default='saves/stcn_s01.pth', help='Path to the pretrained model weights') # CHANGING parser.add_argument('--ttt_lr', default=1e-5, type=float, help='Learning rate use for TTT') # CHANGING # Global parameters parser.add_argument('--overwrite', action='store_true', help='Flag for overwriting the result outputs') # Parameters for the dataloader parser.add_argument('--ttt_sequence_length', default=[3], type=lambda x: list(map(int, x.split(','))), help='Number(s) of frames used in frame sequences used for TTT. It can be one number or ' 'comma-separated e.g. 3,4,5') parser.add_argument('--ttt_resolution', default=480, type=int, help='Frame resolution of the frames in the frame sequence') parser.add_argument('--ttt_batch_size', default=1, type=int, help='Number of sequences used for each batch for TTT') # Parameters for the loss parser.add_argument('--ttt_loss', default="tt-mcc", type=str, choices=['tt-mcc', 'tt-ae', 'tt-ent'], help='Loss function used') # CHANGING parser.add_argument('--ttt_frozen_layers', default=None, type=str, help='Parts of the network that remain frozen during training') parser.add_argument('--ttt_max_jump_step', default=[10], type=lambda x: list(map(int, x.split(','))), help='Maximum jump step between two frames in the frame sequence. It can be one number or ' 'comma-separated e.g. 5,10,15') parser.add_argument('--ttt_number_jump_steps', default=10, type=int, help='Number of different jump step sampled for a video. For each jump step sampled') parser.add_argument('--ttt_number_iterations_per_jump_step', default=10, type=int, help='Number of iterations run for one --ttt_jump_steps') # Parameters for augmentations parser.add_argument('--ttt_augmentations', default='none', choices=['none', 'colour', 'geometric', 'geometric,colour', 'colour,geometric'], help='Type of augmentations used on the frames of the training sequences') parser.add_argument('--ttt_scale', default=[1., 1.], type=lambda x: list(map(float, x.split(','))), help='Range of scale used for the frame augmentation') parser.add_argument('--ttt_ratio', default=[1., 1.], type=lambda x: list(map(float, x.split(','))), help='Range of ratio used for the frame augmentation') # Parameters of the original STCN method parser.add_argument('--top', type=int, default=20, help='Top-k agencies used for inference') parser.add_argument('--mem_every', default=5, type=int, help='Interval for adding frames in the memory bank') parser.add_argument('--amp', action='store_true', help='Flag for mixed precision processing') parser.add_argument('--include_last', action='store_true', help='include last frame as temporary memory?') args = parser.parse_args() return args def save_config(config_filename, args): model_parameters = { "MODEL_FILENAME": args.model_filename, "TOP": args.top, "MEM_EVERY": args.mem_every, "INCLUDE_LAST": args.include_last, "AMP": args.amp, } train_parameters = { "TTT_LR": args.ttt_lr, "TTT_BATCH_SIZE": args.ttt_batch_size, } loss_parameters = { "TTT_LOSS": args.ttt_loss, "TTT_FROZEN_LAYERS": args.ttt_frozen_layers, "TTT_NUMBER_JUMP_STEPS": args.ttt_number_jump_steps, "TTT_NUMBER_ITERATIONS_PER_JUMP_STEP": args.ttt_number_iterations_per_jump_step, } sampling_parameters = { "TTT_RESOLUTION": args.ttt_resolution, "TTT_SEQUENCE_LENGTH": args.ttt_sequence_length, "TTT_MAX_JUMP_STEP": args.ttt_max_jump_step, } augmentation_parameters = { "TTT_AUGMENTATIONS": args.ttt_augmentations, "TTT_SCALE": args.ttt_scale, "TTT_RATIO": args.ttt_ratio, } global_parameters = { "CONFIG_NAME": args.config_name, "OVERWRITE": args.overwrite, } parameters = { "STCN_MODEL": model_parameters, "TEST_TIME_TRAIN": train_parameters, "TEST_TIME_LOSS": loss_parameters, "SEQUENCE_SAMPLING": sampling_parameters, "AUGMENTATIONS": augmentation_parameters, "GLOBAL": global_parameters, } with open(config_filename, 'w') as file: yaml.dump(parameters, file) if __name__ == "__main__": args = parse_arguments() config_filename = os.path.join("configs", f'{args.config_name}.yaml') os.makedirs(os.path.dirname(config_filename), exist_ok=True) save_config(config_filename, args) print(f"saved config {config_filename}")
5,394
Python
.py
100
44.11
130
0.631619
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,942
helper.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/utils/helper.py
import os import time import torch import numpy as np import torch.nn.functional as F from os import path from PIL import Image from inference_core import InferenceCore as InferenceCoreDavis from inference_core_yv import InferenceCore as InferenceCoreYT from util.tensor_util import unpad from dataset.davis_test_dataset import DAVISTestDataset from dataset.yv_test_dataset import YouTubeVOSTestDataset from dataset.mose_test_dataset import MOSETestDataset from model.eval_network import STCN def worker_init_fn(worker_id): return np.random.seed(torch.initial_seed() % (2**31) + worker_id) @torch.no_grad() def video_inference(data, prop_model, output_dir, args): if args.dataset_name == "youtube": video_inference_youtube(data, prop_model, output_dir, args) else: video_inference_not_youtube(data, prop_model, output_dir, args) @torch.no_grad() def video_inference_youtube(data, prop_model, output_dir, args): with torch.cuda.amp.autocast(enabled=args.amp): rgb = data['rgb'] msk = data['gt'][0] info = data['info'] name = info['name'][0] num_objects = len(info['labels'][0]) # (k in davis) gt_obj = info['gt_obj'] # not in davis size = info['size'] # Frames with labels, but they are not exhaustively labeled frames_with_gt = sorted(list(gt_obj.keys())) # torch.cuda.synchronize() process_begin = time.time() processor = InferenceCoreYT(prop_model, rgb, num_objects, top_k=args.top, mem_every=args.mem_every, include_last=args.include_last) # min_idx tells us the starting point of propagation # Propagating before there are labels is not useful min_idx = 99999 for i, frame_idx in enumerate(frames_with_gt): min_idx = min(frame_idx, min_idx) # Note that there might be more than one label per frame obj_idx = gt_obj[frame_idx][0].tolist() # Map the possibly non-continuous labels into a continuous scheme obj_idx = [info['label_convert'][o].item() for o in obj_idx] # Append the background label with_bg_msk = torch.cat([ 1 - torch.sum(msk[:, frame_idx], dim=0, keepdim=True), msk[:, frame_idx], ], 0).cuda() # We perform propagation from the current frame to the next frame with label if i == len(frames_with_gt) - 1: processor.interact(with_bg_msk, frame_idx, rgb.shape[1], obj_idx) else: processor.interact(with_bg_msk, frame_idx, frames_with_gt[i + 1] + 1, obj_idx) # Do unpad -> upsample to original size (we made it 480p) out_masks = torch.zeros((processor.t, 1, *size), dtype=torch.uint8, device='cuda') for ti in range(processor.t): prob = unpad(processor.prob[:, ti], processor.pad) prob = F.interpolate(prob, size, mode='bilinear', align_corners=False) out_masks[ti] = torch.argmax(prob, dim=0) out_masks = (out_masks.detach().cpu().numpy()[:,0]).astype(np.uint8) # Remap the indices to the original domain idx_masks = np.zeros_like(out_masks) for i in range(1, num_objects+1): backward_idx = info['label_backward'][i].item() idx_masks[out_masks==i] = backward_idx stats = { 'process_time': time.time() - process_begin, 'frame_num': out_masks.shape[0] } # Save the results video_output_dir = path.join(output_dir, name) os.makedirs(video_output_dir, exist_ok=True) for f in range(idx_masks.shape[0]): if f >= min_idx: # if args.output_all or (f in req_frames): img_E = Image.fromarray(idx_masks[f]) img_E.putpalette(args.palette) img_E.save(os.path.join(video_output_dir, info['frames'][f][0].replace('.jpg','.png'))) return processor.prob, stats @torch.no_grad() def video_inference_not_youtube(data, prop_model, output_dir, args): with torch.cuda.amp.autocast(enabled=args.amp): rgb = data['rgb'].cuda() msk = data['gt'][0].cuda() info = data['info'] name = info['name'][0] k = len(info['labels'][0]) size = info['size_480p'] # torch.cuda.synchronize() process_begin = time.time() processor = InferenceCoreDavis( prop_model, rgb, k, top_k=args.top, mem_every=args.mem_every, include_last=args.include_last) processor.interact(msk[:, 0], 0, rgb.shape[1]) # Do unpad -> upsample to original size out_masks = torch.zeros((processor.t, 1, *size), dtype=torch.uint8, device='cuda') for ti in range(processor.t): prob = unpad(processor.prob[:, ti], processor.pad) prob = F.interpolate(prob, size, mode='bilinear', align_corners=False) out_masks[ti] = torch.argmax(prob, dim=0) out_masks = (out_masks.detach().cpu().numpy()[:, 0]).astype(np.uint8) stats = { 'process_time': time.time() - process_begin, 'frame_num': out_masks.shape[0] } if output_dir is not None: # Save the results video_output_dir = path.join(output_dir, name) os.makedirs(video_output_dir, exist_ok=True) for f in range(out_masks.shape[0]): img_E = Image.fromarray(out_masks[f]) img_E.putpalette(args.palette) img_E.save(os.path.join(video_output_dir, '{:05d}.png'.format(f))) return processor.prob, stats def get_frame_dirs(args): if args.corrupted_image_dir is None: if args.dataset_name == "youtube": frame_dir = path.join(args.dataset_dir, args.split, "JPEGImages") all_frames_dir = path.join(args.dataset_dir, 'all_frames', 'valid_all_frames', 'JPEGImages') elif args.dataset_name == "davis": frame_dir = path.join(args.dataset_dir, 'trainval', 'JPEGImages', '480p') all_frames_dir = None elif args.dataset_name == "mose": frame_dir = path.join(args.dataset_dir, args.split, 'JPEGImages') all_frames_dir = None else: frame_dir = args.corrupted_image_dir all_frames_dir = frame_dir if args.dataset_name == "youtube" else None return frame_dir, all_frames_dir def dump_logs(output_file, logs): with open(output_file, "w") as writer: for l in logs: writer.write(';'.join(list(map(str, l))) + "\n") def get_palette(args): if args.dataset_name == "youtube": palette_video_name = "0a49f5265b" if args.palette_video_name is None else args.palette_video_name palette_filename = args.dataset_dir + f'/valid/Annotations/{palette_video_name}/00000.png' elif args.dataset_name.startswith("davis"): palette_video_name = "blackswan" if args.palette_video_name is None else args.palette_video_name palette_filename = args.dataset_dir + f'/trainval/Annotations/480p/{palette_video_name}/00000.png' elif args.dataset_name == "mose": palette_video_name = "009ddff6" if args.palette_video_name is None else args.palette_video_name palette_filename = args.dataset_dir + f'/{args.split}/Annotations/{palette_video_name}/00000.png' palette = Image.open(path.expanduser(palette_filename)).getpalette() return palette def get_test_dataset(args): # Filter video names video_names = None if args.video_set_filename is not None: video_names = sorted(np.loadtxt(args.video_set_filename, dtype=str)) # Get test dataset if args.dataset_name == "youtube": video_names = video_names[:25] test_dataset = YouTubeVOSTestDataset(data_root=args.dataset_dir, split=args.split, video_ids=video_names) elif args.dataset_name.startswith("davis"): if args.split == 'val': test_dataset = DAVISTestDataset(args.dataset_dir + '/trainval', imset='2017/val.txt', corrupt_dir=args.corrupted_image_dir) elif args.split == 'testdev': test_dataset = DAVISTestDataset(args.dataset_dir + '/test-dev', imset='2017/test-dev.txt') else: raise NotImplementedError elif args.dataset_name == "mose": test_dataset = MOSETestDataset(path.join(args.dataset_dir, args.split), video_ids=video_names) return test_dataset def get_stcn_model(args): stcn_model = STCN().cuda().eval() # Performs input mapping such that stage 0 model can be loaded prop_saved = torch.load(args.model_filename) for k in list(prop_saved.keys()): if k == 'value_encoder.conv1.weight': if prop_saved[k].shape[1] == 4: pads = torch.zeros((64, 1, 7, 7), device=prop_saved[k].device) prop_saved[k] = torch.cat([prop_saved[k], pads], 1) stcn_model.load_state_dict(prop_saved) return stcn_model
9,061
Python
.py
179
41.374302
113
0.627657
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,943
meter.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/utils/meter.py
import torch import numpy as np from collections import OrderedDict class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':.3f'): self.name = name self.fmt = fmt self.values = [] def reset(self): self.values = [] def update(self, val): self.values.append(val) def avg(self, n=None): arr = self.values[-n:] if n is not None else self.values return np.mean(arr) def std(self, n=None): arr = self.values[-n:] if n is not None else self.values return np.std(arr) def get_stats(self, n=None): return self.avg(n), self.std(n) def last(self): return self.values[-1] def __len__(self): return len(self.values) def __str__(self): fmtstr = '{val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(val=self.last(), avg=self.avg()) class AverageMeterDict(object): def __init__(self): self.meter_dict = OrderedDict() def reset(self): for k, v in self.meter_dict.items(): v.reset() def add(self, name, fmt=':.3f'): self.meter_dict[name] = AverageMeter(name, fmt) def get(self, name): return self.meter_dict[name] def update(self, name, val, fmt=':.3f'): if isinstance(val, torch.Tensor): val = val.clone().detach().cpu().numpy() if name not in self.meter_dict: self.add(name, fmt) self.meter_dict[name].update(val) def avg(self, n=None): return {k: v.avg(n) for k, v in self.meter_dict.items()} def last(self): return {k: v.last() for k, v in self.meter_dict.items()} def items(self): return self.meter_dict.items() def keys(self): return self.meter_dict.keys() def values(self): return self.meter_dict.values() def values_avg(self): return [v.avg() for v in self.meter_dict.values()] def to_str(self): return {k: str(v) for k, v in self.meter_dict.items()} def print(self, delimiter=' '): return delimiter.join(['{}: {}'.format(k, str(v)) for k, v in self.meter_dict.items()]) def __len__(self): return min([len(v) for v in self.meter_dict.values()]) if len(self.meter_dict) else 0
2,339
Python
.py
62
30.306452
95
0.593514
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,944
vos_dataset_ttt.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/dataset/vos_dataset_ttt.py
import copy import os from os import path import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from torchvision.transforms import InterpolationMode from PIL import Image import numpy as np from dataset.range_transform import im_normalization, im_mean from dataset.reseed import reseed from dataset.util import all_to_onehot from util.tensor_util import pad_divide_by class VOSDataset(Dataset): """ Works for DAVIS/YouTubeVOS/BL30K training For each sequence: - Pick three frames - Pick two objects - Apply some random transforms that are the same for all frames - Apply random transform to each of the frame - The distance between frames is controlled """ def __init__(self, im_root, gt_root, video_name, max_jump, num_frames, total_sequences=1, video_percentage=1., mem_every=None, resolution=480, scale=(1., 1.), ratio=(1., 1.), augmentations='none', coverage=0., all_objects=False, check_last=True, max_obj=6, frames_with_gt=[0], im_root_all_frames=None # needed for yt ): self.im_root = path.join(im_root, video_name) self.gt_root = path.join(gt_root, video_name) self.video = video_name if im_root_all_frames is None: self.im_root_all_frames = self.im_root self.frames = sorted(os.listdir(self.im_root)) mask = Image.open(path.join(self.gt_root, self.frames[0][:-4] + '.png')).convert('P') self.all_labels = np.unique(np.array(mask)) self.relative_position_first_frame = 0 else: self.im_root_all_frames = path.join(im_root_all_frames, video_name) subsampled_frames = sorted(os.listdir(self.im_root)) self.all_frames = sorted(os.listdir(self.im_root_all_frames)) first_subsampled_frame_id = int(subsampled_frames[0].split(".")[0]) self.frames = [(i_frame, frame) for i_frame, frame in enumerate(self.all_frames) if int(frame.split(".")[0]) >= first_subsampled_frame_id] self.relative_position_first_frame = int(self.frames[0][0]) self.frames = [frame for i_frame, frame in self.frames] self.frames_with_gt = frames_with_gt # this is needed for the yt dataset self.total_sequences = total_sequences self.len_frames = int(len(self.frames) * video_percentage) self.check_last = check_last self.max_obj = max_obj self.max_jump = max_jump self.num_frames = num_frames self.mem_every = mem_every self.all_objects = all_objects self.coverage = coverage # These set of transform is the same for im/gt pairs, but different among the 3 sampled frames self.pair_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.01, 0.01, 0.01, 0), ]) if 'colour' in augmentations else lambda x: x self.pair_im_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=15, shear=10, interpolation=InterpolationMode.BICUBIC, fill=im_mean), ]) if 'geometric' in augmentations else lambda x: x self.pair_gt_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=15, shear=10, interpolation=InterpolationMode.NEAREST, fill=0), ]) if 'geometric' in augmentations else lambda x: x # These transforms are the same for all pairs in the sampled sequence self.all_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.1, 0.03, 0.03, 0), transforms.RandomGrayscale(0.05), ]) if 'colour' in augmentations else lambda x: x self.all_im_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip() if 'geometric' in augmentations else transforms.Lambda(lambda x: x), transforms.RandomResizedCrop((resolution, resolution), scale=scale, ratio=ratio, interpolation=Image.BICUBIC) if ratio != (1., 1.) or scale != (1., 1.) else transforms.Resize(resolution, interpolation=Image.BICUBIC) ]) self.all_gt_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip() if 'geometric' in augmentations else transforms.Lambda(lambda x: x), transforms.RandomResizedCrop((resolution, resolution), scale=scale, ratio=ratio, interpolation=Image.NEAREST) if ratio != (1., 1.) or scale != (1., 1.) else transforms.Resize(resolution, interpolation=Image.NEAREST) ]) # Final transform without randomness self.final_im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) def select_first_frame_id(self): """ Selects the id of the first frame of the triplet. In DAVIS, DAVIS-C and Mose, it is always the first frame but for the YouTube dataset, it might be a later frame. Additionally, in the youtube dataset, each object may be annotated for the first time in a different frame (represented in self.frames_with_gt). """ frame_with_gt = np.random.choice(self.frames_with_gt) first_frame_id = (frame_with_gt - self.relative_position_first_frame) filename = path.join(self.gt_root, self.frames[first_frame_id][:-4] + '.png') mask = Image.open(filename).convert('P') all_labels = np.unique(np.array(mask)) return first_frame_id, all_labels def get_sequence(self, first_frame_id): if self.num_frames == 1: return [first_frame_id] # Don't want to bias towards beginning/end if isinstance(self.max_jump, tuple): this_max_jump = np.random.randint(*self.max_jump) else: this_max_jump = self.max_jump this_max_jump = min(self.len_frames - first_frame_id - 1, this_max_jump) frames_idx = [first_frame_id] for i in range(self.num_frames - 2): if self.mem_every is not None: r = np.arange(self.mem_every, self.len_frames, self.mem_every) r = r[np.logical_and(r > frames_idx[-1], r <= frames_idx[-1] + this_max_jump)] f_idx = np.random.choice(r) if len(r) else frames_idx[-1] + self.mem_every else: f_idx = frames_idx[-1] + np.random.randint(this_max_jump) + 1 f_idx = min(f_idx, self.len_frames - this_max_jump, self.len_frames - 1) frames_idx.append(f_idx) f_idx = frames_idx[-1] + np.random.randint(this_max_jump) + 1 f_idx = min(f_idx, self.len_frames - 1) frames_idx.append(f_idx) return frames_idx def __getitem__(self, idx): info = {'name': self.video} trials, limit = 0, 100 while trials < limit: while True: first_frame_id, all_labels = self.select_first_frame_id() frames_idx = self.get_sequence(first_frame_id) if len(frames_idx) == 1: # for the tt-AE baseline, no triplet is used break if frames_idx[-1] != frames_idx[-2]: break sequence_seed = np.random.randint(2147483647) images = [] masks = [] target_object = None for f_idx in frames_idx: jpg_name = self.frames[f_idx][:-4] + '.jpg' png_name = self.frames[f_idx][:-4] + '.png' reseed(sequence_seed) filename_img = path.join(self.im_root_all_frames, jpg_name) this_im = Image.open(filename_img).convert('RGB') this_im = self.all_im_dual_transform(this_im) this_im = self.all_im_lone_transform(this_im) reseed(sequence_seed) mask_name = path.join(self.gt_root, png_name) this_gt = Image.open(mask_name).convert('P') this_gt = self.all_gt_dual_transform(this_gt) pairwise_seed = np.random.randint(2147483647) reseed(pairwise_seed) this_im = self.pair_im_dual_transform(this_im) this_im = self.pair_im_lone_transform(this_im) reseed(pairwise_seed) this_gt = self.pair_gt_dual_transform(this_gt) this_im = self.final_im_transform(this_im) this_gt = np.array(this_gt) images.append(this_im) masks.append(this_gt) labels = np.unique(masks[0]) # Remove background labels = labels[labels != 0] if len(labels) == 0: target_object = -1 # all black if no objects has_second_object = False else: if self.all_objects: target_objects = np.random.choice(labels, np.minimum(len(labels), self.max_obj), replace=False) if not self.check_last or len(set(target_objects) - set(np.unique(masks[-1]))) == 0: break if len(all_labels) == len(np.unique(masks[0])) == len(np.unique(masks[-1])): break if trials > limit // 2 and len(all_labels) == len(np.unique(masks[0])): break else: target_object = np.random.choice(labels) has_second_object = (len(labels) > 1) if has_second_object: second_object = np.random.choice(labels[labels != target_object]) ratio = (masks[-1] == target_object).mean() / (masks[0] == target_object).mean() if self.coverage <= 0. or ratio > self.coverage: break trials += 1 if self.check_last and ( self.all_objects and len(np.unique(masks[0])) != len(np.unique(masks[-1]))) or trials >= limit: images[-1] = copy.deepcopy(images[0]) masks[-1] = copy.deepcopy(masks[0]) frames_idx[-1] = first_frame_id info['frames_idx'] = frames_idx images = torch.stack(images, 0) masks = np.stack(masks, 0) images = pad_divide_by(images, 16)[0] masks = pad_divide_by(torch.from_numpy(masks), 16)[0].numpy() if self.all_objects: labels = np.unique(masks[0]) labels = labels[labels != 0] cls_gt = np.zeros(masks.shape, dtype=np.int64) for i, l in enumerate(labels): cls_gt[masks == l] = i + 1 obj_masks = torch.from_numpy(all_to_onehot(cls_gt, labels)).float() obj_masks = obj_masks.unsqueeze(2) # O x T x 1 x H x W object_count = obj_masks.shape[0] if object_count > 1: other_mask = torch.sum(obj_masks, dim=0, keepdim=True) - obj_masks selector = torch.FloatTensor([1 for _ in range(object_count)]) if len(target_objects) < len(labels): obj_masks = obj_masks[(target_objects - 1).tolist()] other_mask = other_mask[(target_objects - 1).tolist()] selector = selector[(target_objects - 1).tolist()] cls_gt = np.zeros(masks.shape, dtype=np.int64) for i, l in enumerate(target_objects): cls_gt[masks == l] = i + 1 else: other_mask = torch.cat([torch.zeros_like(obj_masks), obj_masks], 0) obj_masks = torch.cat([obj_masks, torch.zeros_like(obj_masks)], 0) selector = torch.FloatTensor([1, 0]) else: tar_masks = (masks == target_object).astype(np.float32)[:, None, :, :] if has_second_object: sec_masks = (masks == second_object).astype(np.float32)[:, None, :, :] selector = torch.FloatTensor([1, 1]) else: sec_masks = np.zeros_like(tar_masks) selector = torch.FloatTensor([1, 0]) obj_masks = np.stack([tar_masks, sec_masks]) other_mask = np.stack([sec_masks, tar_masks]) cls_gt = np.zeros(masks.shape, dtype=np.int64) cls_gt[tar_masks[:, 0] > 0.5] = 1 cls_gt[sec_masks[:, 0] > 0.5] = 2 labels = np.unique(masks[0]) labels = labels[labels != 0] info['labels'] = labels data = { 'rgb': images, 'gt': obj_masks, 'cls_gt': cls_gt, 'sec_gt': other_mask, 'selector': selector, 'info': info, } return data def __len__(self): return self.total_sequences
13,099
Python
.py
259
37.471042
115
0.564429
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,945
model_ttt.py
ttt-matching-based-vos_ttt_matching_vos/STCN/ttt/model/model_ttt.py
""" model.py - warpper and utility functions for network training Compute loss, back-prop, update parameters, logging, etc. """ import torch import torch.nn as nn from model.network import STCN class STCN_TTT(STCN): def __init__(self,): super().__init__(False) def propagate(self, frames, mask_first, mask_second, selector, k16, kf16_thin, kf16, kf8, kf4, encode_first=False, mem_frame=None): ref_v = torch.stack([ self.encode_value(frames[:, 0], kf16[:, 0], mask_first[:, j, 0], mask_second[:, j, 0]) for j in range(mask_first.shape[1]) ], 1) ref_k = k16[:, :, 0].unsqueeze(2) logits, masks = [], [] if encode_first: first_logits, first_mask = self.decode(ref_v, kf16_thin[:, 0], kf8[:, 0], kf4[:, 0], selector) logits.append(first_logits) masks.append(first_mask) # Segment frame 1 with frame 0 l = frames.shape[1] - 1 mem_loop = range(1, l) if mem_frame is None else [mem_frame] for i in mem_loop: prev_logits, prev_mask = self.segment(k16[:, :, i], kf16_thin[:, i], kf8[:, i], kf4[:, i], ref_k, ref_v, selector) prev_other = torch.sum(prev_mask, dim=1, keepdim=True) - prev_mask prev_v = torch.stack([ self.encode_value(frames[:, i].clone(), kf16[:, i].clone(), prev_mask[:, j, None], prev_other[:, j, None]) for j in range(prev_mask.shape[1]) ], 1) ref_v = torch.cat([ref_v, prev_v], 3) ref_k = torch.cat([ref_k, k16[:, :, i].unsqueeze(2)], 2) logits.append(prev_logits) masks.append(prev_mask) # Segment frame 2 with frame 0 and 1 last_logits, last_mask = self.segment(k16[:, :, l], kf16_thin[:, l], kf8[:, l], kf4[:, l], ref_k, ref_v, selector) logits.append(last_logits) masks.append(last_mask) return logits, masks def do_cycle_pass(self, data, backwards=True, encode_first=True): Fs = data['rgb'] Ms = data['gt'] sec_Ms = data['sec_gt'] selector = data['selector'] # key features never change, compute once k16, kf16_thin, kf16, kf8, kf4 = self.encode_key(Fs) # forward pass logits_f, masks_f = self.propagate(Fs, Ms, sec_Ms, selector, k16, kf16_thin, kf16, kf8, kf4, encode_first) # backward pass logits_b, masks_b = None, None if backwards: Ms_b = masks_f[-1][:, :, None, None] sec_Ms_b = torch.sum(Ms_b, dim=1, keepdim=True) - Ms_b logits_b, masks_b = self.propagate(Fs.flip(dims=(1,)), Ms_b, sec_Ms_b, selector, k16.flip(dims=(2,)), kf16_thin.flip(dims=(1,)), kf16.flip(dims=(1,)), kf8.flip(dims=(1,)), kf4.flip(dims=(1,)), encode_first) return logits_f, logits_b, masks_f, masks_b def do_single_pass(self, data): Fs = data['rgb'] Ms = data['gt'] sec_Ms = data['sec_gt'] selector = data['selector'] # key features never change, compute once k16, kf16_thin, kf16, kf8, kf4 = self.encode_key(Fs[:, :1]) ref_v = torch.stack([ self.encode_value(Fs[:, 0], kf16[:, 0], Ms[:, j, 0], sec_Ms[:, j, 0]) for j in range(Ms.shape[1]) ], 1) logits, masks = self.decode(ref_v, kf16_thin[:, 0], kf8[:, 0], kf4[:, 0], selector) return [logits], [masks] def forward(self, data): return self.do_cycle_pass(data) def copy_weights_from(self, model): if isinstance(model, nn.Module): self.load_state_dict(model.state_dict()) else: self.load_state_dict(model) def copy_weights_to(self, model): model.load_state_dict(self.state_dict()) def freeze_encoders(self): self.freeze_key_encoder() self.freeze_value_encoder() def freeze_decoder(self): for param in self.decoder.parameters(): param.requires_grad = False def freeze_all_keys(self): self.freeze_key_encoder() self.freeze_key_proj() self.freeze_key_comp() def freeze_key_encoder(self): for param in self.key_encoder.parameters(): param.requires_grad = False def freeze_value_encoder(self): for param in self.value_encoder.parameters(): param.requires_grad = False def freeze_key_proj(self): for param in self.key_proj.parameters(): param.requires_grad = False def freeze_key_comp(self): for param in self.key_comp.parameters(): param.requires_grad = False def freeze_network(self): for param in self.parameters(): param.requires_grad = False def freeze_batch_norms(self): for module in self.modules(): if isinstance(module, nn.BatchNorm2d): if hasattr(module, 'weight'): module.weight.requires_grad_(False) if hasattr(module, 'bias'): module.bias.requires_grad_(False) module.eval() def unfreeze_batch_norms(self): for module in self.modules(): if isinstance(module, nn.BatchNorm2d): if hasattr(module, 'weight'): module.weight.requires_grad_(True) if hasattr(module, 'bias'): module.bias.requires_grad_(True) module.eval() def freeze_parse(self, freeze_str): if freeze_str is not None: for fm in freeze_str.lower().split(','): if 'enc' == fm: self.freeze_encoders() if 'dec' == fm: self.freeze_decoder() if 'all_keys' == fm: self.freeze_all_keys() if 'key_enc' == fm: self.freeze_key_encoder() if 'val_enc' == fm: self.freeze_value_encoder() if 'key_proj' == fm: self.freeze_key_proj() if 'key_comp' == fm: self.freeze_key_comp() if 'net' == fm: self.freeze_network() if 'bn' == fm: self.freeze_batch_norms() if 'ubn' == fm: self.unfreeze_batch_norms() # for name, param in self.named_parameters(): # if param.requires_grad: # print(name)
6,738
Python
.py
151
32.086093
122
0.524878
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,946
util.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/util.py
import numpy as np def all_to_onehot(masks, labels): if len(masks.shape) == 3: Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8) else: Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1]), dtype=np.uint8) for k, l in enumerate(labels): Ms[k] = (masks == l).astype(np.uint8) return Ms
386
Python
.py
9
36.111111
100
0.621918
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,947
generic_test_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/generic_test_dataset.py
import os from os import path import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from torchvision.transforms import InterpolationMode from PIL import Image import numpy as np from dataset.range_transform import im_normalization from dataset.util import all_to_onehot class GenericTestDataset(Dataset): def __init__(self, data_root, res=480): self.image_dir = path.join(data_root, 'JPEGImages') self.mask_dir = path.join(data_root, 'Annotations') self.videos = [] self.shape = {} self.frames = {} vid_list = sorted(os.listdir(self.image_dir)) # Pre-reading for vid in vid_list: frames = sorted(os.listdir(os.path.join(self.image_dir, vid))) self.frames[vid] = frames self.videos.append(vid) first_mask = os.listdir(path.join(self.mask_dir, vid))[0] _mask = np.array(Image.open(path.join(self.mask_dir, vid, first_mask)).convert("P")) self.shape[vid] = np.shape(_mask) if res != -1: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, transforms.Resize(res, interpolation=InterpolationMode.BICUBIC), ]) self.mask_transform = transforms.Compose([ transforms.Resize(res, interpolation=InterpolationMode.NEAREST), ]) else: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) self.mask_transform = transforms.Compose([ ]) def __getitem__(self, idx): video = self.videos[idx] info = {} info['name'] = video info['frames'] = self.frames[video] info['size'] = self.shape[video] # Real sizes info['gt_obj'] = {} # Frames with labelled objects vid_im_path = path.join(self.image_dir, video) vid_gt_path = path.join(self.mask_dir, video) frames = self.frames[video] images = [] masks = [] for i, f in enumerate(frames): img = Image.open(path.join(vid_im_path, f)).convert('RGB') images.append(self.im_transform(img)) mask_file = path.join(vid_gt_path, f.replace('.jpg','.png')) if path.exists(mask_file): mask = Image.open(mask_file).convert('P') palette = mask.getpalette() masks.append(np.array(mask, dtype=np.uint8)) this_labels = np.unique(masks[-1]) this_labels = this_labels[this_labels!=0] info['gt_obj'][i] = this_labels else: # Mask not exists -> nothing in it masks.append(np.zeros(self.shape[video])) images = torch.stack(images, 0) masks = np.stack(masks, 0) # Construct the forward and backward mapping table for labels # this is because YouTubeVOS's labels are sometimes not continuous # while we want continuous ones (for one-hot) # so we need to maintain a backward mapping table labels = np.unique(masks).astype(np.uint8) labels = labels[labels!=0] info['label_convert'] = {} info['label_backward'] = {} idx = 1 for l in labels: info['label_convert'][l] = idx info['label_backward'][idx] = l idx += 1 masks = torch.from_numpy(all_to_onehot(masks, labels)).float() # Resize to 480p masks = self.mask_transform(masks) masks = masks.unsqueeze(2) info['labels'] = labels data = { 'rgb': images, 'gt': masks, 'info': info, 'palette': np.array(palette), } return data def __len__(self): return len(self.videos)
3,957
Python
.py
97
30.082474
96
0.575702
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,948
vos_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/vos_dataset.py
import os from os import path import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from torchvision.transforms import InterpolationMode from PIL import Image import numpy as np from dataset.range_transform import im_normalization, im_mean from dataset.reseed import reseed from dataset.util import all_to_onehot class VOSDataset(Dataset): """ Works for DAVIS/YouTubeVOS/BL30K training For each sequence: - Pick three frames - Pick two objects - Apply some random transforms that are the same for all frames - Apply random transform to each of the frame - The distance between frames is controlled """ def __init__(self, im_root, gt_root, max_jump, is_bl, subset=None): self.im_root = im_root self.gt_root = gt_root self.max_jump = max_jump self.is_bl = is_bl self.videos = [] self.frames = {} vid_list = sorted(os.listdir(self.im_root)) subset = ["drone"] # print(f"vid_list {vid_list}") # # raise Exception("jj") # # Pre-filtering # print(f"subset {subset}") for vid in vid_list: if subset is not None: if vid not in subset: continue frames = sorted(os.listdir(os.path.join(self.im_root, vid))) if len(frames) < 3: continue self.frames[vid] = frames self.videos.append(vid) print(f"self.videos {len(self.videos)}") print(f"self.videos {self.videos}") # raise Exception("jj") print('%d out of %d videos accepted in %s.' % (len(self.videos), len(vid_list), im_root)) # These set of transform is the same for im/gt pairs, but different among the 3 sampled frames self.pair_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.01, 0.01, 0.01, 0), ]) self.pair_im_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=15, shear=10, interpolation=InterpolationMode.BICUBIC, fill=im_mean), ]) self.pair_gt_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=15, shear=10, interpolation=InterpolationMode.NEAREST, fill=0), ]) # These transform are the same for all pairs in the sampled sequence self.all_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.1, 0.03, 0.03, 0), transforms.RandomGrayscale(0.05), ]) if self.is_bl: # Use a different cropping scheme for the blender dataset because the image size is different self.all_im_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop((384, 384), scale=(0.25, 1.00), interpolation=InterpolationMode.BICUBIC) ]) self.all_gt_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop((384, 384), scale=(0.25, 1.00), interpolation=InterpolationMode.NEAREST) ]) else: self.all_im_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop((384, 384), scale=(0.36,1.00), interpolation=InterpolationMode.BICUBIC) ]) self.all_gt_dual_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop((384, 384), scale=(0.36,1.00), interpolation=InterpolationMode.NEAREST) ]) # Final transform without randomness self.final_im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) def __getitem__(self, idx): video = self.videos[idx] info = {} info['name'] = video vid_im_path = path.join(self.im_root, video) vid_gt_path = path.join(self.gt_root, video) frames = self.frames[video] trials = 0 while trials < 5: info['frames'] = [] # Appended with actual frames # Don't want to bias towards beginning/end this_max_jump = min(len(frames), self.max_jump) start_idx = np.random.randint(len(frames)-this_max_jump+1) f1_idx = start_idx + np.random.randint(this_max_jump+1) + 1 f1_idx = min(f1_idx, len(frames)-this_max_jump, len(frames)-1) f2_idx = f1_idx + np.random.randint(this_max_jump+1) + 1 f2_idx = min(f2_idx, len(frames)-this_max_jump//2, len(frames)-1) frames_idx = [start_idx, f1_idx, f2_idx] if np.random.rand() < 0.5: # Reverse time frames_idx = frames_idx[::-1] sequence_seed = np.random.randint(2147483647) images = [] masks = [] target_object = None for f_idx in frames_idx: jpg_name = frames[f_idx][:-4] + '.jpg' png_name = frames[f_idx][:-4] + '.png' info['frames'].append(jpg_name) reseed(sequence_seed) this_im = Image.open(path.join(vid_im_path, jpg_name)).convert('RGB') this_im = self.all_im_dual_transform(this_im) this_im = self.all_im_lone_transform(this_im) reseed(sequence_seed) this_gt = Image.open(path.join(vid_gt_path, png_name)).convert('P') this_gt = self.all_gt_dual_transform(this_gt) pairwise_seed = np.random.randint(2147483647) reseed(pairwise_seed) this_im = self.pair_im_dual_transform(this_im) this_im = self.pair_im_lone_transform(this_im) reseed(pairwise_seed) this_gt = self.pair_gt_dual_transform(this_gt) this_im = self.final_im_transform(this_im) this_gt = np.array(this_gt) images.append(this_im) masks.append(this_gt) images = torch.stack(images, 0) labels = np.unique(masks[0]) # Remove background labels = labels[labels!=0] if self.is_bl: # Find large enough labels good_lables = [] for l in labels: pixel_sum = (masks[0]==l).sum() if pixel_sum > 10*10: # OK if the object is always this small # Not OK if it is actually much bigger if pixel_sum > 30*30: good_lables.append(l) elif max((masks[1]==l).sum(), (masks[2]==l).sum()) < 20*20: good_lables.append(l) labels = np.array(good_lables, dtype=np.uint8) if len(labels) == 0: target_object = -1 # all black if no objects has_second_object = False trials += 1 else: target_object = np.random.choice(labels) has_second_object = (len(labels) > 1) if has_second_object: labels = labels[labels!=target_object] second_object = np.random.choice(labels) break masks = np.stack(masks, 0) tar_masks = (masks==target_object).astype(np.float32)[:,np.newaxis,:,:] if has_second_object: sec_masks = (masks==second_object).astype(np.float32)[:,np.newaxis,:,:] selector = torch.FloatTensor([1, 1]) else: sec_masks = np.zeros_like(tar_masks) selector = torch.FloatTensor([1, 0]) print(F"tar_masks {tar_masks.shape}") print(F"sec_masks {sec_masks.shape}") cls_gt = np.zeros((3, 384, 384), dtype=np.int8) cls_gt[tar_masks[:,0] > 0.5] = 1 cls_gt[sec_masks[:,0] > 0.5] = 2 print(f"cls_gt {cls_gt.shape}") # raise Exception("j") data = { 'rgb': images, 'gt': tar_masks, 'cls_gt': cls_gt, 'sec_gt': sec_masks, 'selector': selector, 'info': info, } return data def __len__(self): return len(self.videos) def __getitem__test(self, index): video = self.videos[index] info = {} info['name'] = video # print(video) # if video == "dogs-jump": # print(index) # raise Exception("j") vid_im_path = path.join(self.im_root, video) vid_gt_path = path.join(self.gt_root, video) frames = self.frames[video] # info['frames'] = [] # info['num_frames'] = self.num_frames[video] # info['size_480p'] = self.size_480p[video] images = [] masks = [] for f in range(10): # self.num_frames[video]): jpg_name = frames[f][:-4] + '.jpg' png_name = frames[f][:-4] + '.png' img_file = path.join(vid_im_path, jpg_name) images.append(self.final_im_transform(Image.open(img_file).convert('RGB'))) mask_file = path.join(vid_gt_path, png_name) if path.exists(mask_file): masks.append(np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)) else: # Test-set maybe? masks.append(np.zeros_like(masks[0])) images = torch.stack(images, 0) masks = np.stack(masks, 0) labels = np.unique(masks[0]) labels = labels[labels != 0] masks = torch.from_numpy(all_to_onehot(masks, labels)).float() # if self.resolution != 480: # masks = self.mask_transform(masks) masks = masks.unsqueeze(2) info['labels'] = labels # print(f"masks {masks.shape}") data = { 'rgb': images, 'gt': masks, 'info': info, } return data
10,113
Python
.py
229
32.10917
117
0.554424
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,949
mose_test_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/mose_test_dataset.py
""" Modified from https://github.com/seoungwugoh/STM/blob/master/dataset.py """ import os from os import path import numpy as np from PIL import Image import torch from torchvision import transforms from torchvision.transforms import InterpolationMode from torch.utils.data.dataset import Dataset from dataset.range_transform import im_normalization from dataset.util import all_to_onehot class MOSETestDataset(Dataset): def __init__(self, root, resolution=480, single_object=False, target_name=None, video_ids=None): self.root = root self.mask_dir = path.join(root, 'Annotations') self.image_dir = path.join(root, 'JPEGImages') self.resolution = resolution self.videos = [] self.num_frames = {} self.num_objects = {} self.shape = {} for _video in sorted(os.listdir(self.image_dir)): if target_name is not None and target_name != _video: continue if video_ids is not None and _video not in video_ids: continue self.videos.append(_video) self.num_frames[_video] = len(os.listdir(path.join(self.image_dir, _video))) _mask = np.array(Image.open(path.join(self.mask_dir, _video, '00000.png')).convert("P")) self.num_objects[_video] = np.max(_mask) self.shape[_video] = np.shape(_mask) self.single_object = single_object self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, transforms.Resize(resolution, interpolation=InterpolationMode.BICUBIC), ]) self.mask_transform = transforms.Compose([ transforms.Resize(resolution, interpolation=InterpolationMode.NEAREST), ]) def __len__(self): return len(self.videos) def __getitem__(self, index): video = self.videos[index] info = {} info['name'] = video info['frames'] = [] info['num_frames'] = self.num_frames[video] images = [] masks = [] for f in range(self.num_frames[video]): img_file = path.join(self.image_dir, video, '{:05d}.jpg'.format(f)) images.append(self.im_transform(Image.open(img_file).convert('RGB'))) info['frames'].append('{:05d}.jpg'.format(f)) mask_file = path.join(self.mask_dir, video, '{:05d}.png'.format(f)) if path.exists(mask_file): masks.append(np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)) else: # Test-set maybe? masks.append(np.zeros_like(masks[0])) info['size_480p'] = masks[0].shape images = torch.stack(images, 0) masks = np.stack(masks, 0) gt = masks.copy() if self.single_object: labels = [1] masks = (masks > 0.5).astype(np.uint8) masks = torch.from_numpy(all_to_onehot(masks, labels)).float() else: labels = np.unique(masks[0]) labels = labels[labels!=0] masks = torch.from_numpy(all_to_onehot(masks, labels)).float() masks = self.mask_transform(masks) masks = masks.unsqueeze(2) info['size'] = images.shape[2:] info['labels'] = labels data = { 'rgb': images, 'gt': masks, 'cls_gt': gt, 'info': info, } return data
3,472
Python
.py
85
31.2
100
0.59267
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,950
davis_test_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/davis_test_dataset.py
""" Modified from https://github.com/seoungwugoh/STM/blob/master/dataset.py """ import os from os import path import numpy as np from PIL import Image import torch from torchvision import transforms from torchvision.transforms import InterpolationMode from torch.utils.data.dataset import Dataset from dataset.range_transform import im_normalization from dataset.util import all_to_onehot class DAVISTestDataset(Dataset): def __init__(self, root, imset='2017/val.txt', resolution=480, single_object=False, target_name=None, dataset_name="davis", jpeg_path="", corrupt_dir=None, video_set=None): self.root = root if resolution == 480: res_tag = '480p' else: res_tag = 'Full-Resolution' self.resolution = resolution if dataset_name == "davis": self.mask_dir = path.join(root, 'Annotations', res_tag) self.mask480_dir = path.join(root, 'Annotations', '480p') if jpeg_path != "": self.image_dir = jpeg_path elif corrupt_dir is not None: self.image_dir = corrupt_dir else: self.image_dir = path.join(root, 'JPEGImages', res_tag) _imset_dir = path.join(root) #, 'ImageSets') if 'ImageSets' not in imset: _imset_dir = path.join(root, 'ImageSets') else: self.mask_dir = path.join(root, 'Annotations') self.mask480_dir = path.join(root, 'Annotations') self.image_dir = path.join(root, 'JPEGImages') _imset_dir = path.join(root) _imset_f = path.join(_imset_dir, imset) self.videos = [] self.num_frames = {} self.num_objects = {} self.shape = {} self.size_480p = {} # target_name = "dogs-jump" with open(path.join(_imset_f), "r") as lines: for line in lines: _video = line.rstrip('\n').split("/")[-1] if target_name is not None and target_name != _video: continue self.videos.append(_video) self.num_frames[_video] = len(os.listdir(path.join(self.image_dir, _video))) _mask = np.array( Image.open(path.join(self.mask_dir, _video, '00000.png')).convert("P")) self.num_objects[_video] = np.max(_mask) self.shape[_video] = np.shape(_mask) _mask480 = np.array( Image.open(path.join(self.mask480_dir, _video, '00000.png')).convert("P")) self.size_480p[_video] = np.shape(_mask480) if video_set is not None: self.videos = [k for k in self.videos if k in video_set] self.videos = list(sorted(self.videos)) print(f"self.videos {self.videos}") self.single_object = single_object if resolution == -1: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) else: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, transforms.Resize(resolution, interpolation=InterpolationMode.BICUBIC), ]) self.mask_transform = transforms.Compose([ transforms.Resize(resolution, interpolation=InterpolationMode.NEAREST), ]) def __len__(self): return len(self.videos) def __getitem__(self, index): video = self.videos[index] info = {} info['name'] = video info['frames'] = [] info['num_frames'] = self.num_frames[video] info['size_480p'] = self.size_480p[video] images = [] masks = [] for f in range(self.num_frames[video]): img_file = path.join(self.image_dir, video, '{:05d}.jpg'.format(f)) images.append(self.im_transform(Image.open(img_file).convert('RGB'))) info['frames'].append('{:05d}.jpg'.format(f)) mask_file = path.join(self.mask_dir, video, '{:05d}.png'.format(f)) if path.exists(mask_file): masks.append(np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)) else: # Test-set maybe? masks.append(np.zeros_like(masks[0])) images = torch.stack(images, 0) masks = np.stack(masks, 0) gt = masks.copy() if self.single_object: labels = [1] masks = (masks > 0.5).astype(np.uint8) masks = torch.from_numpy(all_to_onehot(masks, labels)).float() else: labels = np.unique(masks[0]) labels = labels[labels!=0] masks = torch.from_numpy(all_to_onehot(masks, labels)).float() if self.resolution != -1: masks = self.mask_transform(masks) masks = masks.unsqueeze(2) info['labels'] = labels data = { 'rgb': images, 'gt': masks, 'cls_gt': gt, 'info': info, } return data
5,166
Python
.py
122
30.909836
105
0.557446
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,951
static_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/static_dataset.py
import os from os import path import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from torchvision.transforms import InterpolationMode from PIL import Image import numpy as np from dataset.range_transform import im_normalization, im_mean from dataset.tps import random_tps_warp from dataset.reseed import reseed class StaticTransformDataset(Dataset): """ Generate pseudo VOS data by applying random transforms on static images. Single-object only. Method 0 - FSS style (class/1.jpg class/1.png) Method 1 - Others style (XXX.jpg XXX.png) """ def __init__(self, root, method=0): self.root = root self.method = method if method == 0: # Get images self.im_list = [] classes = os.listdir(self.root) for c in classes: imgs = os.listdir(path.join(root, c)) jpg_list = [im for im in imgs if 'jpg' in im[-3:].lower()] joint_list = [path.join(root, c, im) for im in jpg_list] self.im_list.extend(joint_list) elif method == 1: self.im_list = [path.join(self.root, im) for im in os.listdir(self.root) if '.jpg' in im] print('%d images found in %s' % (len(self.im_list), root)) # These set of transform is the same for im/gt pairs, but different among the 3 sampled frames self.pair_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.1, 0.05, 0.05, 0), # No hue change here as that's not realistic ]) self.pair_im_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=20, scale=(0.9,1.1), shear=10, interpolation=InterpolationMode.BICUBIC, fill=im_mean), transforms.Resize(384, InterpolationMode.BICUBIC), transforms.RandomCrop((384, 384), pad_if_needed=True, fill=im_mean), ]) self.pair_gt_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=20, scale=(0.9,1.1), shear=10, interpolation=InterpolationMode.BICUBIC, fill=0), transforms.Resize(384, InterpolationMode.NEAREST), transforms.RandomCrop((384, 384), pad_if_needed=True, fill=0), ]) # These transform are the same for all pairs in the sampled sequence self.all_im_lone_transform = transforms.Compose([ transforms.ColorJitter(0.1, 0.05, 0.05, 0.05), transforms.RandomGrayscale(0.05), ]) self.all_im_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=0, scale=(0.8, 1.5), fill=im_mean), transforms.RandomHorizontalFlip(), ]) self.all_gt_dual_transform = transforms.Compose([ transforms.RandomAffine(degrees=0, scale=(0.8, 1.5), fill=0), transforms.RandomHorizontalFlip(), ]) # Final transform without randomness self.final_im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) self.final_gt_transform = transforms.Compose([ transforms.ToTensor(), ]) def __getitem__(self, idx): im = Image.open(self.im_list[idx]).convert('RGB') if self.method == 0: gt = Image.open(self.im_list[idx][:-3]+'png').convert('L') else: gt = Image.open(self.im_list[idx].replace('.jpg','.png')).convert('L') sequence_seed = np.random.randint(2147483647) images = [] masks = [] for _ in range(3): reseed(sequence_seed) this_im = self.all_im_dual_transform(im) this_im = self.all_im_lone_transform(this_im) reseed(sequence_seed) this_gt = self.all_gt_dual_transform(gt) pairwise_seed = np.random.randint(2147483647) reseed(pairwise_seed) this_im = self.pair_im_dual_transform(this_im) this_im = self.pair_im_lone_transform(this_im) reseed(pairwise_seed) this_gt = self.pair_gt_dual_transform(this_gt) # Use TPS only some of the times # Not because TPS is bad -- just that it is too slow and I need to speed up data loading if np.random.rand() < 0.33: this_im, this_gt = random_tps_warp(this_im, this_gt, scale=0.02) this_im = self.final_im_transform(this_im) this_gt = self.final_gt_transform(this_gt) images.append(this_im) masks.append(this_gt) images = torch.stack(images, 0) masks = torch.stack(masks, 0) info = {} info['name'] = self.im_list[idx] cls_gt = np.zeros((3, 384, 384), dtype=np.int) cls_gt[masks[:,0] > 0.5] = 1 data = { 'rgb': images, 'gt': masks, 'cls_gt': cls_gt, 'info': info } return data def __len__(self): return len(self.im_list)
5,023
Python
.py
112
34.776786
130
0.602787
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,952
range_transform.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/range_transform.py
import torchvision.transforms as transforms im_mean = (124, 116, 104) im_normalization = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) inv_im_trans = transforms.Normalize( mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
377
Python
.py
9
30.777778
64
0.531507
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,953
yv_test_dataset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/yv_test_dataset.py
import os from os import path import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from torchvision.transforms import InterpolationMode from PIL import Image import numpy as np from dataset.range_transform import im_normalization from dataset.util import all_to_onehot class YouTubeVOSTestDataset(Dataset): def __init__(self, data_root, split, res=480, video_ids=None): self.image_dir = path.join(data_root, 'all_frames', split+'_all_frames', 'JPEGImages') self.mask_dir = path.join(data_root, split, 'Annotations') self.videos = [] self.shape = {} self.frames = {} vid_list = sorted(os.listdir(self.image_dir)) # Pre-reading for vid in vid_list: if video_ids is not None and vid not in video_ids: continue frames = sorted(os.listdir(os.path.join(self.image_dir, vid))) self.frames[vid] = frames self.videos.append(vid) first_mask = os.listdir(path.join(self.mask_dir, vid))[0] _mask = np.array(Image.open(path.join(self.mask_dir, vid, first_mask)).convert("P")) self.shape[vid] = np.shape(_mask) if res != -1: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, transforms.Resize(res, interpolation=InterpolationMode.BICUBIC), ]) self.mask_transform = transforms.Compose([ transforms.Resize(res, interpolation=InterpolationMode.NEAREST), ]) else: self.im_transform = transforms.Compose([ transforms.ToTensor(), im_normalization, ]) self.mask_transform = transforms.Compose([ ]) def __getitem__(self, idx): video = self.videos[idx] info = {} info['name'] = video info['frames'] = self.frames[video] info['size'] = self.shape[video] # Real sizes info['gt_obj'] = {} # Frames with labelled objects vid_im_path = path.join(self.image_dir, video) vid_gt_path = path.join(self.mask_dir, video) frames = self.frames[video] images = [] masks = [] for i, f in enumerate(frames): img = Image.open(path.join(vid_im_path, f)).convert('RGB') images.append(self.im_transform(img)) mask_file = path.join(vid_gt_path, f.replace('.jpg','.png')) if path.exists(mask_file): masks.append(np.array(Image.open(mask_file).convert('P'), dtype=np.uint8)) this_labels = np.unique(masks[-1]) this_labels = this_labels[this_labels!=0] info['gt_obj'][i] = this_labels else: # Mask not exists -> nothing in it masks.append(np.zeros(self.shape[video])) images = torch.stack(images, 0) masks = np.stack(masks, 0) # Construct the forward and backward mapping table for labels # this is because YouTubeVOS's labels are sometimes not continuous # while we want continuous ones (for one-hot) # so we need to maintain a backward mapping table labels = np.unique(masks).astype(np.uint8) labels = labels[labels!=0] info['label_convert'] = {} info['label_backward'] = {} idx = 1 for l in labels: info['label_convert'][l] = idx info['label_backward'][idx] = l idx += 1 masks = torch.from_numpy(all_to_onehot(masks, labels)).float() # Resize to 480p masks = self.mask_transform(masks) masks = masks.unsqueeze(2) info['labels'] = labels data = { 'rgb': images, 'gt': masks, 'info': info, } return data def __len__(self): return len(self.videos)
3,999
Python
.py
96
31.010417
96
0.580695
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,954
tps.py
ttt-matching-based-vos_ttt_matching_vos/STCN/dataset/tps.py
import numpy as np from PIL import Image import cv2 import thinplate as tps cv2.setNumThreads(0) def pick_random_points(h, w, n_samples): y_idx = np.random.choice(np.arange(h), size=n_samples, replace=False) x_idx = np.random.choice(np.arange(w), size=n_samples, replace=False) return y_idx/h, x_idx/w def warp_dual_cv(img, mask, c_src, c_dst): dshape = img.shape theta = tps.tps_theta_from_points(c_src, c_dst, reduced=True) grid = tps.tps_grid(theta, c_dst, dshape) mapx, mapy = tps.tps_grid_to_remap(grid, img.shape) return cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR), cv2.remap(mask, mapx, mapy, cv2.INTER_NEAREST) def random_tps_warp(img, mask, scale, n_ctrl_pts=12): """ Apply a random TPS warp of the input image and mask Uses randomness from numpy """ img = np.asarray(img) mask = np.asarray(mask) h, w = mask.shape points = pick_random_points(h, w, n_ctrl_pts) c_src = np.stack(points, 1) c_dst = c_src + np.random.normal(scale=scale, size=c_src.shape) warp_im, warp_gt = warp_dual_cv(img, mask, c_src, c_dst) return Image.fromarray(warp_im), Image.fromarray(warp_gt)
1,167
Python
.py
28
37.5
103
0.686726
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,955
hyper_para.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/hyper_para.py
from argparse import ArgumentParser def none_or_default(x, default): return x if x is not None else default class HyperParameters(): def parse(self, unknown_arg_ok=False): parser = ArgumentParser() # Enable torch.backends.cudnn.benchmark -- Faster in some cases, test in your own environment parser.add_argument('--benchmark', action='store_true') parser.add_argument('--no_amp', action='store_true') # Data parameters parser.add_argument('--static_root', help='Static training data root', default='../static') parser.add_argument('--bl_root', help='Blender training data root', default='../BL30K') parser.add_argument('--yv_root', help='YouTubeVOS data root', default='../YouTube') parser.add_argument('--davis_root', help='DAVIS data root', default='../DAVIS') parser.add_argument('--stage', help='Training stage (0-static images, 1-Blender dataset, 2-DAVIS+YouTubeVOS (300K), 3-DAVIS+YouTubeVOS (150K))', type=int, default=0) parser.add_argument('--num_workers', help='Number of datalaoder workers per process', type=int, default=8) # Generic learning parameters parser.add_argument('-b', '--batch_size', help='Default is dependent on the training stage, see below', default=None, type=int) parser.add_argument('-i', '--iterations', help='Default is dependent on the training stage, see below', default=None, type=int) parser.add_argument('--steps', help='Default is dependent on the training stage, see below', nargs="*", default=None, type=int) parser.add_argument('--lr', help='Initial learning rate', type=float) parser.add_argument('--gamma', help='LR := LR*gamma at every decay step', default=0.1, type=float) # Loading parser.add_argument('--load_network', help='Path to pretrained network weight only') parser.add_argument('--load_model', help='Path to the model file, including network, optimizer and such') # Logging information parser.add_argument('--id', help='Experiment UNIQUE id, use NULL to disable logging to tensorboard', default='NULL') parser.add_argument('--debug', help='Debug mode which logs information more often', action='store_true') # Multiprocessing parameters, not set by users parser.add_argument('--local_rank', default=0, type=int, help='Local rank of this process') if unknown_arg_ok: args, _ = parser.parse_known_args() self.args = vars(args) else: self.args = vars(parser.parse_args()) self.args['amp'] = not self.args['no_amp'] # Stage-dependent hyperparameters # Assign default if not given if self.args['stage'] == 0: # Static image pretraining self.args['lr'] = none_or_default(self.args['lr'], 1e-5) self.args['batch_size'] = none_or_default(self.args['batch_size'], 8) self.args['iterations'] = none_or_default(self.args['iterations'], 300000) self.args['steps'] = none_or_default(self.args['steps'], [150000]) self.args['single_object'] = True elif self.args['stage'] == 1: # BL30K pretraining self.args['lr'] = none_or_default(self.args['lr'], 1e-5) self.args['batch_size'] = none_or_default(self.args['batch_size'], 4) self.args['iterations'] = none_or_default(self.args['iterations'], 500000) self.args['steps'] = none_or_default(self.args['steps'], [400000]) self.args['single_object'] = False elif self.args['stage'] == 2: # 300K main training for after BL30K self.args['lr'] = none_or_default(self.args['lr'], 1e-5) self.args['batch_size'] = none_or_default(self.args['batch_size'], 4) self.args['iterations'] = none_or_default(self.args['iterations'], 300000) self.args['steps'] = none_or_default(self.args['steps'], [250000]) self.args['single_object'] = False elif self.args['stage'] == 3: # 150K main training for after static image pretraining self.args['lr'] = none_or_default(self.args['lr'], 1e-5) self.args['batch_size'] = none_or_default(self.args['batch_size'], 4) self.args['iterations'] = none_or_default(self.args['iterations'], 150000) self.args['steps'] = none_or_default(self.args['steps'], [125000]) self.args['single_object'] = False else: raise NotImplementedError def __getitem__(self, key): return self.args[key] def __setitem__(self, key, value): self.args[key] = value def __str__(self): return str(self.args)
4,763
Python
.py
74
54.216216
173
0.629709
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,956
tensor_util.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/tensor_util.py
import torch.nn.functional as F def compute_tensor_iu(seg, gt): intersection = (seg & gt).float().sum() union = (seg | gt).float().sum() return intersection, union def compute_tensor_iou(seg, gt): intersection, union = compute_tensor_iu(seg, gt) iou = (intersection + 1e-6) / (union + 1e-6) return iou # STM def pad_divide_by(in_img, d, in_size=None): if in_size is None: h, w = in_img.shape[-2:] else: h, w = in_size if h % d > 0: new_h = h + d - h % d else: new_h = h if w % d > 0: new_w = w + d - w % d else: new_w = w lh, uh = int((new_h-h) / 2), int(new_h-h) - int((new_h-h) / 2) lw, uw = int((new_w-w) / 2), int(new_w-w) - int((new_w-w) / 2) pad_array = (int(lw), int(uw), int(lh), int(uh)) out = F.pad(in_img, pad_array) return out, pad_array def unpad(img, pad): if pad[2]+pad[3] > 0: img = img[:,:,pad[2]:-pad[3],:] if pad[0]+pad[1] > 0: img = img[:,:,:,pad[0]:-pad[1]] return img
1,045
Python
.py
34
25.176471
66
0.524476
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,957
logger.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/logger.py
""" Dumps things to tensorboard and console """ import os import warnings import git import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter def tensor_to_numpy(image): image_np = (image.numpy() * 255).astype('uint8') return image_np def detach_to_cpu(x): return x.detach().cpu() def fix_width_trunc(x): return ('{:.9s}'.format('{:0.9f}'.format(x))) class TensorboardLogger: def __init__(self, short_id, id): self.short_id = short_id if self.short_id == 'NULL': self.short_id = 'DEBUG' if id is None: self.no_log = True warnings.warn('Logging has been disbaled.') else: self.no_log = False self.inv_im_trans = transforms.Normalize( mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225]) self.inv_seg_trans = transforms.Normalize( mean=[-0.5/0.5], std=[1/0.5]) log_path = os.path.join('.', 'log', '%s' % id) self.logger = SummaryWriter(log_path) repo = git.Repo(".") self.log_string('git', str(repo.active_branch) + ' ' + str(repo.head.commit.hexsha)) def log_scalar(self, tag, x, step): if self.no_log: warnings.warn('Logging has been disabled.') return self.logger.add_scalar(tag, x, step) def log_metrics(self, l1_tag, l2_tag, val, step, f=None): tag = l1_tag + '/' + l2_tag text = '{:s} - It {:6d} [{:5s}] [{:13}]: {:s}'.format(self.short_id, step, l1_tag.upper(), l2_tag, fix_width_trunc(val)) print(text) if f is not None: f.write(text + '\n') f.flush() self.log_scalar(tag, val, step) def log_im(self, tag, x, step): if self.no_log: warnings.warn('Logging has been disabled.') return x = detach_to_cpu(x) x = self.inv_im_trans(x) x = tensor_to_numpy(x) self.logger.add_image(tag, x, step) def log_cv2(self, tag, x, step): if self.no_log: warnings.warn('Logging has been disabled.') return x = x.transpose((2, 0, 1)) self.logger.add_image(tag, x, step) def log_seg(self, tag, x, step): if self.no_log: warnings.warn('Logging has been disabled.') return x = detach_to_cpu(x) x = self.inv_seg_trans(x) x = tensor_to_numpy(x) self.logger.add_image(tag, x, step) def log_gray(self, tag, x, step): if self.no_log: warnings.warn('Logging has been disabled.') return x = detach_to_cpu(x) x = tensor_to_numpy(x) self.logger.add_image(tag, x, step) def log_string(self, tag, x): print(tag, x) if self.no_log: warnings.warn('Logging has been disabled.') return self.logger.add_text(tag, x)
3,017
Python
.py
83
27.313253
128
0.552804
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,958
log_integrator.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/log_integrator.py
""" Integrate numerical values for some iterations Typically used for loss computation / logging to tensorboard Call finalize and create a new Integrator when you want to display/log """ import torch class Integrator: def __init__(self, logger, distributed=True, local_rank=0, world_size=1): self.values = {} self.counts = {} self.hooks = [] # List is used here to maintain insertion order self.logger = logger self.distributed = distributed self.local_rank = local_rank self.world_size = world_size def add_tensor(self, key, tensor): if key not in self.values: self.counts[key] = 1 if type(tensor) == float or type(tensor) == int: self.values[key] = tensor else: self.values[key] = tensor.mean().item() else: self.counts[key] += 1 if type(tensor) == float or type(tensor) == int: self.values[key] += tensor else: self.values[key] += tensor.mean().item() def add_dict(self, tensor_dict): for k, v in tensor_dict.items(): self.add_tensor(k, v) def add_hook(self, hook): """ Adds a custom hook, i.e. compute new metrics using values in the dict The hook takes the dict as argument, and returns a (k, v) tuple e.g. for computing IoU """ if type(hook) == list: self.hooks.extend(hook) else: self.hooks.append(hook) def reset_except_hooks(self): self.values = {} self.counts = {} # Average and output the metrics def finalize(self, prefix, it, f=None): for hook in self.hooks: k, v = hook(self.values) self.add_tensor(k, v) for k, v in self.values.items(): if k[:4] == 'hide': continue avg = v / self.counts[k] if self.distributed: # Inplace operation avg = torch.tensor(avg).cuda() torch.distributed.reduce(avg, dst=0) if self.local_rank == 0: avg = (avg/self.world_size).cpu().item() self.logger.log_metrics(prefix, k, avg, it, f) else: # Simple does it self.logger.log_metrics(prefix, k, avg, it, f)
2,408
Python
.py
63
27.555556
77
0.550258
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,959
load_subset.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/load_subset.py
""" load_subset.py - Presents a subset of data DAVIS - only the training set YouTubeVOS - I manually filtered some erroneous ones out but I haven't checked all """ def load_sub_davis(path='util/davis_subset.txt'): with open(path, mode='r') as f: subset = set(f.read().splitlines()) return subset def load_sub_yv(path='util/yv_subset.txt'): with open(path, mode='r') as f: subset = set(f.read().splitlines()) return subset
457
Python
.py
13
31.461538
82
0.687075
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,960
image_saver.py
ttt-matching-based-vos_ttt_matching_vos/STCN/util/image_saver.py
import cv2 import numpy as np import torch from dataset.range_transform import inv_im_trans from collections import defaultdict def tensor_to_numpy(image): image_np = (image.numpy() * 255).astype('uint8') return image_np def tensor_to_np_float(image): image_np = image.numpy().astype('float32') return image_np def detach_to_cpu(x): return x.detach().cpu() def transpose_np(x): return np.transpose(x, [1,2,0]) def tensor_to_gray_im(x): x = detach_to_cpu(x) x = tensor_to_numpy(x) x = transpose_np(x) return x def tensor_to_im(x): x = detach_to_cpu(x) x = inv_im_trans(x).clamp(0, 1) x = tensor_to_numpy(x) x = transpose_np(x) return x def tensor_to_seg(x): x = detach_to_cpu(x) x = inv_seg_trans(x).clamp(0, 1) x = tensor_to_numpy(x) x = transpose_np(x) return x # Predefined key <-> caption dict key_captions = { 'im': 'Image', 'gt': 'GT', } """ Return an image array with captions keys in dictionary will be used as caption if not provided values should contain lists of cv2 images """ def get_image_array(images, grid_shape, captions={}): h, w = grid_shape cate_counts = len(images) rows_counts = len(next(iter(images.values()))) font = cv2.FONT_HERSHEY_SIMPLEX output_image = np.zeros([w*cate_counts, h*(rows_counts+1), 3], dtype=np.uint8) col_cnt = 0 for k, v in images.items(): # Default as key value itself caption = captions.get(k, k) # Handles new line character dy = 40 for i, line in enumerate(caption.split('\n')): cv2.putText(output_image, line, (10, col_cnt*w+100+i*dy), font, 0.8, (255,255,255), 2, cv2.LINE_AA) # Put images for row_cnt, img in enumerate(v): im_shape = img.shape if len(im_shape) == 2: img = img[..., np.newaxis] img = (img * 255).astype('uint8') output_image[(col_cnt+0)*w:(col_cnt+1)*w, (row_cnt+1)*h:(row_cnt+2)*h, :] = img col_cnt += 1 return output_image def base_transform(im, size): im = tensor_to_np_float(im) if len(im.shape) == 3: im = im.transpose((1, 2, 0)) else: im = im[:, :, None] # Resize if im.shape[1] != size: im = cv2.resize(im, size, interpolation=cv2.INTER_NEAREST) return im.clip(0, 1) def im_transform(im, size): return base_transform(inv_im_trans(detach_to_cpu(im)), size=size) def mask_transform(mask, size): return base_transform(detach_to_cpu(mask), size=size) def out_transform(mask, size): return base_transform(detach_to_cpu(torch.sigmoid(mask)), size=size) def pool_pairs(images, size, so): req_images = defaultdict(list) b, s, _, _, _ = images['gt'].shape # limit number of images to save disk space b = max(2, b) GT_name = 'GT' for b_idx in range(b): GT_name += ' %s\n' % images['info']['name'][b_idx] for b_idx in range(b): for s_idx in range(s): req_images['RGB'].append(im_transform(images['rgb'][b_idx,s_idx], size)) if s_idx == 0: req_images['Mask'].append(np.zeros((size[1], size[0], 3))) if not so: req_images['Mask 2'].append(np.zeros((size[1], size[0], 3))) else: req_images['Mask'].append(mask_transform(images['mask_%d'%s_idx][b_idx], size)) if not so: req_images['Mask 2'].append(mask_transform(images['sec_mask_%d'%s_idx][b_idx], size)) req_images[GT_name].append(mask_transform(images['gt'][b_idx,s_idx], size)) if not so: req_images[GT_name + '_2'].append(mask_transform(images['sec_gt'][b_idx,s_idx], size)) return get_image_array(req_images, size, key_captions)
3,923
Python
.py
106
29.613208
105
0.58702
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,961
network.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/network.py
""" network.py - The core of the neural network Defines the structure and memory operations Modifed from STM: https://github.com/seoungwugoh/STM The trailing number of a variable usually denote the stride e.g. f16 -> encoded features with stride 16 """ import math import torch import torch.nn as nn import torch.nn.functional as F from model.modules import * class Decoder(nn.Module): def __init__(self): super().__init__() self.compress = ResBlock(1024, 512) self.up_16_8 = UpsampleBlock(512, 512, 256) # 1/16 -> 1/8 self.up_8_4 = UpsampleBlock(256, 256, 256) # 1/8 -> 1/4 self.pred = nn.Conv2d(256, 1, kernel_size=(3,3), padding=(1,1), stride=1) def forward(self, f16, f8, f4): x = self.compress(f16) x = self.up_16_8(f8, x) x = self.up_8_4(f4, x) x = self.pred(F.relu(x)) x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=False) return x class MemoryReader(nn.Module): def __init__(self): super().__init__() def get_affinity(self, mk, qk): B, CK, T, H, W = mk.shape mk = mk.flatten(start_dim=2) qk = qk.flatten(start_dim=2) # See supplementary material a_sq = mk.pow(2).sum(1).unsqueeze(2) ab = mk.transpose(1, 2) @ qk affinity = (2*ab-a_sq) / math.sqrt(CK) # B, THW, HW # softmax operation; aligned the evaluation style maxes = torch.max(affinity, dim=1, keepdim=True)[0] x_exp = torch.exp(affinity - maxes) x_exp_sum = torch.sum(x_exp, dim=1, keepdim=True) affinity = x_exp / x_exp_sum return affinity def readout(self, affinity, mv, qv): B, CV, T, H, W = mv.shape mo = mv.view(B, CV, T*H*W) mem = torch.bmm(mo, affinity) # Weighted-sum B, CV, HW mem = mem.view(B, CV, H, W) mem_out = torch.cat([mem, qv], dim=1) return mem_out class STCN(nn.Module): def __init__(self, single_object): super().__init__() self.single_object = single_object self.key_encoder = KeyEncoder() if single_object: self.value_encoder = ValueEncoderSO() else: self.value_encoder = ValueEncoder() # Projection from f16 feature space to key space self.key_proj = KeyProjection(1024, keydim=64) # Compress f16 a bit to use in decoding later on self.key_comp = nn.Conv2d(1024, 512, kernel_size=3, padding=1) self.memory = MemoryReader() self.decoder = Decoder() def aggregate(self, prob): new_prob = torch.cat([ torch.prod(1-prob, dim=1, keepdim=True), prob ], 1).clamp(1e-7, 1-1e-7) logits = torch.log((new_prob /(1-new_prob))) return logits def encode_key(self, frame): # input: b*t*c*h*w b, t = frame.shape[:2] f16, f8, f4 = self.key_encoder(frame.flatten(start_dim=0, end_dim=1)) k16 = self.key_proj(f16) f16_thin = self.key_comp(f16) # B*C*T*H*W k16 = k16.view(b, t, *k16.shape[-3:]).transpose(1, 2).contiguous() # B*T*C*H*W f16_thin = f16_thin.view(b, t, *f16_thin.shape[-3:]) f16 = f16.view(b, t, *f16.shape[-3:]) f8 = f8.view(b, t, *f8.shape[-3:]) f4 = f4.view(b, t, *f4.shape[-3:]) return k16, f16_thin, f16, f8, f4 def encode_value(self, frame, kf16, mask, other_mask=None): # Extract memory key/value for a frame if self.single_object: f16 = self.value_encoder(frame, kf16, mask) else: f16 = self.value_encoder(frame, kf16, mask, other_mask) return f16.unsqueeze(2) # B*512*T*H*W def segment(self, qk16, qv16, qf8, qf4, mk16, mv16, selector=None): """ qk16, qv16, qf8, qf4 are the values corresponding to the current frame mk16, mv16 are the values in memory (past and not current frame) """ # q - query, m - memory # qv16 is f16_thin above affinity = self.memory.get_affinity(mk16, qk16) if self.single_object: logits = self.decoder(self.memory.readout(affinity, mv16, qv16), qf8, qf4) prob = torch.sigmoid(logits) else: logits = torch.cat([ self.decoder(self.memory.readout(affinity, mv16[:, i], qv16), qf8, qf4) for i in range(mv16.shape[1]) ], 1) prob = torch.sigmoid(logits) prob = prob * selector.unsqueeze(2).unsqueeze(2) logits = self.aggregate(prob) prob = F.softmax(logits, dim=1)[:, 1:] return logits, prob def decode(self, mv16, qv16, qf8, qf4, selector=None): B, CV, T, H, W = mv16[:, 0].shape if self.single_object: mem = mv16.view(B, CV, H, W) logits = self.decoder(torch.cat([mem, qv16], dim=1), qf8, qf4) prob = torch.sigmoid(logits) else: logits = [ self.decoder(torch.cat([mv16[:, i].view(B, CV, H, W), qv16], dim=1), qf8, qf4) for i in range(mv16.shape[1]) ] logits = torch.cat(logits, 1) prob = torch.sigmoid(logits) prob = prob * selector.unsqueeze(2).unsqueeze(2) logits = self.aggregate(prob) # add background + logits prob = F.softmax(logits, dim=1)[:, 1:] # softmax logits return logits, prob def forward(self, mode, *args, **kwargs): if mode == 'encode_key': return self.encode_key(*args, **kwargs) elif mode == 'encode_value': return self.encode_value(*args, **kwargs) elif mode == 'segment': return self.segment(*args, **kwargs) elif mode == 'decode': return self.decode(*args, **kwargs) else: raise NotImplementedError
5,934
Python
.py
142
32.507042
94
0.573576
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,962
losses.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/losses.py
import torch import torch.nn as nn import torch.nn.functional as F from util.tensor_util import compute_tensor_iu from collections import defaultdict def get_iou_hook(values): return 'iou/iou', (values['hide_iou/i']+1)/(values['hide_iou/u']+1) def get_sec_iou_hook(values): return 'iou/sec_iou', (values['hide_iou/sec_i']+1)/(values['hide_iou/sec_u']+1) iou_hooks_so = [ get_iou_hook, ] iou_hooks_mo = [ get_iou_hook, get_sec_iou_hook, ] class EntropyLoss(nn.Module): def __init__(self, dim): super(EntropyLoss, self).__init__() self.dim = dim def forward(self, x): b = F.softmax(x, dim=self.dim) * F.log_softmax(x, dim=self.dim) b = -1.0 * b.sum(self.dim) return b.mean() class ConsistencyLoss(nn.Module): def __init__(self, ): super(ConsistencyLoss, self).__init__() self.kl_criterion = nn.KLDivLoss(reduction="mean", log_target=True) def forward(self, logits): loss = 0. x = [F.log_softmax(x, 1) for x in logits] for i, x1 in enumerate(x): for j, x2 in enumerate(x): if i != j: loss += self.kl_criterion(x1, x2) return loss / len(x) / (len(x)-1) # https://stackoverflow.com/questions/63735255/how-do-i-compute-bootstrapped-cross-entropy-loss-in-pytorch class BootstrappedCE(nn.Module): def __init__(self, start_warm=20000, end_warm=70000, top_p=0.15): super().__init__() self.start_warm = start_warm self.end_warm = end_warm self.top_p = top_p def forward(self, input, target, it): if it < self.start_warm: return F.cross_entropy(input, target), 1.0 raw_loss = F.cross_entropy(input, target, reduction='none').view(-1) num_pixels = raw_loss.numel() if it > self.end_warm: this_p = self.top_p else: length = max(self.end_warm-self.start_warm, 1) this_p = self.top_p + (1 - self.top_p) * ((self.end_warm - it) / length) loss, _ = torch.topk(raw_loss, int(num_pixels * this_p), sorted=False) print(f"raw_loss {raw_loss.mean()} loss {loss.mean()} this_p {this_p}") return loss.mean(), this_p class LossComputer: def __init__(self, para, start_warm=20000, end_warm=70000, top_p=0.15): super().__init__() self.para = para self.bce = BootstrappedCE(start_warm, end_warm, top_p) def compute(self, data, it): losses = defaultdict(int) b, s, _, _, _ = data['gt'].shape selector = data.get('selector', None) for i in range(1, s): # Have to do it in a for-loop like this since not every entry has the second object # Well it's not a lot of iterations anyway for j in range(b): if selector is not None and selector[j][1] > 0.5: loss, p = self.bce(data['logits_%d'%i][j:j+1], data['cls_gt'][j:j+1,i], it) else: loss, p = self.bce(data['logits_%d'%i][j:j+1,:2], data['cls_gt'][j:j+1,i], it) losses['loss_%d'%i] += loss / b losses['p'] += p / b / (s-1) losses['total_loss'] += losses['loss_%d'%i] new_total_i, new_total_u = compute_tensor_iu(data['mask_%d'%i]>0.5, data['gt'][:,i]>0.5) losses['hide_iou/i'] += new_total_i losses['hide_iou/u'] += new_total_u if selector is not None: new_total_i, new_total_u = compute_tensor_iu(data['sec_mask_%d'%i]>0.5, data['sec_gt'][:,i]>0.5) losses['hide_iou/sec_i'] += new_total_i losses['hide_iou/sec_u'] += new_total_u return losses def compute_auto_encoder_first_frame(self, data, it): losses = defaultdict(int) b, s, _, _, _ = data['gt'].shape # selector = data.get('selector', None) for j in range(b): # print(f"data['logits'][j:j+1] {data['logits'][j:j+1].shape}") # print(f"data['cls_gt'][j:j+1] {data['cls_gt'][j:j + 1, 0].shape}") loss, p = self.bce(data['logits'][j:j+1], data['cls_gt'][j:j + 1, 0], it) losses['loss'] += loss / b losses['p'] += p / b losses['total_loss'] += losses['loss'] print(f"losses {losses['total_loss']}") losses['hide_iou/i'] = 1 losses['hide_iou/u'] = 1 losses['hide_iou/sec_i'] = 1 losses['hide_iou/sec_u'] = 1 # new_total_i, new_total_u = compute_tensor_iu( # data['masks'] > 0.5, data['gt'][:, i] > 0.5) # losses['hide_iou/i'] += new_total_i # losses['hide_iou/u'] += new_total_u # # if selector is not None: # new_total_i, new_total_u = compute_tensor_iu( # data['sec_mask_%d' % i] > 0.5, data['sec_gt'][:, i] > 0.5) # losses['hide_iou/sec_i'] += new_total_i # losses['hide_iou/sec_u'] += new_total_u return losses
5,014
Python
.py
111
36
112
0.546593
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,963
eval_network.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/eval_network.py
""" eval_network.py - Evaluation version of the network The logic is basically the same but with top-k and some implementation optimization The trailing number of a variable usually denote the stride e.g. f16 -> encoded features with stride 16 """ import torch import torch.nn as nn import torch.nn.functional as F from model.modules import * from model.network import Decoder class STCN(nn.Module): def __init__(self): super().__init__() self.key_encoder = KeyEncoder() self.value_encoder = ValueEncoder() # Projection from f16 feature space to key space self.key_proj = KeyProjection(1024, keydim=64) # Compress f16 a bit to use in decoding later on self.key_comp = nn.Conv2d(1024, 512, kernel_size=3, padding=1) self.decoder = Decoder() def encode_value(self, frame, kf16, masks): k, _, h, w = masks.shape # Extract memory key/value for a frame with multiple masks frame = frame.view(1, 3, h, w).repeat(k, 1, 1, 1) # Compute the "others" mask if k != 1: others = torch.cat([ torch.sum( masks[[j for j in range(k) if i!=j]] , dim=0, keepdim=True) for i in range(k)], 0) else: others = torch.zeros_like(masks) f16 = self.value_encoder(frame, kf16.repeat(k,1,1,1), masks, others) return f16.unsqueeze(2) def encode_key(self, frame): f16, f8, f4 = self.key_encoder(frame) k16 = self.key_proj(f16) f16_thin = self.key_comp(f16) return k16, f16_thin, f16, f8, f4 def segment(self, mem_bank, qk16): return mem_bank.match_memory(qk16) def decode(self, readout_mem, mem_bank, qf8, qf4, qv16): k = mem_bank.num_objects return self.decode_readout(k, readout_mem, qv16, qf8, qf4) def segment_with_query(self, mem_bank, qf8, qf4, qk16, qv16): k = mem_bank.num_objects readout_mem = mem_bank.match_memory(qk16) return self.decode_readout(k, readout_mem, qv16, qf8, qf4) def decode_readout(self, k, readout_mem, qv16, qf8, qf4): qv16 = qv16.expand(k, -1, -1, -1) qv16 = torch.cat([readout_mem, qv16], 1) # decoded_mask = self.decoder(qv16, qf8, qf4) # si = torch.sigmoid(decoded_mask) # indx = torch.where((si[1, 0] > 0.5) & (si[2, 0] > 0.5)) # print(indx) # print(f"decoded_mask {decoded_mask.shape}") # print(f"decoded_mask [0, 0] {decoded_mask[:, 0, 263, 427]}") # print(f"si [0, 0] {si[:, 0, 263, 427]}") # print() # print(f"decoded_mask [100, 300] {decoded_mask[:, 0, 365, 426]}") # print(f"si [100, 300] {si[:, 0, 365, 426]}") # raise Exception("j") return torch.sigmoid(self.decoder(qv16, qf8, qf4))
2,854
Python
.py
66
35.287879
76
0.60224
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,964
model.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/model.py
""" model.py - warpper and utility functions for network training Compute loss, back-prop, update parameters, logging, etc. """ import os import time import torch import torch.nn as nn import torch.optim as optim from model.network import STCN from model.losses import LossComputer, iou_hooks_mo, iou_hooks_so from util.log_integrator import Integrator from util.image_saver import pool_pairs class STCNModel: def __init__(self, para, logger=None, save_path=None): #, local_rank=0, world_size=1): self.para = para self.single_object = para['single_object'] self.local_rank = 1 # self.STCN = nn.parallel.DistributedDataParallel( # STCN(self.single_object).cuda(), # device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) self.STCN = STCN(self.single_object).cuda() # Setup logger when local_rank=0 self.logger = logger self.save_path = save_path if logger is not None: self.last_time = time.time() self.train_integrator = Integrator(self.logger, distributed=True) if self.single_object: self.train_integrator.add_hook(iou_hooks_so) else: self.train_integrator.add_hook(iou_hooks_mo) self.loss_computer = LossComputer(para) self.train() self.optimizer = optim.Adam(filter( lambda p: p.requires_grad, self.STCN.parameters()), lr=para['lr'], weight_decay=1e-7) self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, para['steps'], para['gamma']) if para['amp']: self.scaler = torch.cuda.amp.GradScaler() # Logging info self.report_interval = 100 self.save_im_interval = 800 self.save_model_interval = 50000 if para['debug']: self.report_interval = self.save_im_interval = 1 def do_pass(self, data, it=0): """ For the size below: T = frame count B = batch size O = object count C = feature size """ # No need to store the gradient outside training torch.set_grad_enabled(self._is_train) for k, v in data.items(): if type(v) != list and type(v) != dict and type(v) != int: data[k] = v.cuda(non_blocking=True) out = {} Fs = data['rgb'] # B, T, c, h, w Ms = data['gt'] # B, O, T, c, h, w with torch.cuda.amp.autocast(enabled=self.para['amp']): # key features never change, compute once k16, kf16_thin, kf16, kf8, kf4 = self.STCN('encode_key', Fs) # k16 after key projection head on kf16: B, C(64), T, h2, w2 # kf16_thin after value projection head on kf16 = B, T, C(512), h2, w2 # kf16 resnet50 midlevel 16 = B, T, C(1024), h2, w2 # kf8 resnet50 midlevel 8 = B, T, C(512), h2, w2 # kf4 resnet50 midlevel 4 = B, T, C(256), h2, w2 if self.single_object: ref_v = self.STCN('encode_value', Fs[:,0], kf16[:,0], Ms[:,0]) # Segment frame 1 with frame 0 prev_logits, prev_mask = self.STCN('segment', k16[:,:,1], kf16_thin[:,1], kf8[:,1], kf4[:,1], k16[:,:,0:1], ref_v) prev_v = self.STCN('encode_value', Fs[:,1], kf16[:,1], prev_mask) values = torch.cat([ref_v, prev_v], 2) del ref_v # Segment frame 2 with frame 0 and 1 this_logits, this_mask = self.STCN('segment', k16[:,:,2], kf16_thin[:,2], kf8[:,2], kf4[:,2], k16[:,:,0:2], values) out['mask_1'] = prev_mask out['mask_2'] = this_mask out['logits_1'] = prev_logits out['logits_2'] = this_logits else: sec_Ms = data['sec_gt'] # second object mask selector = data['selector'] ref_v1 = self.STCN('encode_value', Fs[:,0], kf16[:,0], Ms[:,0], sec_Ms[:,0]) ref_v2 = self.STCN('encode_value', Fs[:,0], kf16[:,0], sec_Ms[:,0], Ms[:,0]) ref_v = torch.stack([ref_v1, ref_v2], 1) # Segment frame 1 with frame 0 prev_logits, prev_mask = self.STCN('segment', k16[:,:,1], kf16_thin[:,1], kf8[:,1], kf4[:,1], k16[:,:,0:1], ref_v, selector) prev_v1 = self.STCN('encode_value', Fs[:,1], kf16[:,1], prev_mask[:,0:1], prev_mask[:,1:2]) prev_v2 = self.STCN('encode_value', Fs[:,1], kf16[:,1], prev_mask[:,1:2], prev_mask[:,0:1]) prev_v = torch.stack([prev_v1, prev_v2], 1) values = torch.cat([ref_v, prev_v], 3) del ref_v # Segment frame 2 with frame 0 and 1 this_logits, this_mask = self.STCN('segment', k16[:,:,2], kf16_thin[:,2], kf8[:,2], kf4[:,2], k16[:,:,0:2], values, selector) out['mask_1'] = prev_mask[:,0:1] out['mask_2'] = this_mask[:,0:1] out['sec_mask_1'] = prev_mask[:,1:2] out['sec_mask_2'] = this_mask[:,1:2] out['logits_1'] = prev_logits out['logits_2'] = this_logits if self._do_log or self._is_train: losses = self.loss_computer.compute({**data, **out}, it) # Logging if self._do_log: self.integrator.add_dict(losses) if self._is_train: if it % self.save_im_interval == 0 and it != 0: if self.logger is not None: images = {**data, **out} size = (384, 384) self.logger.log_cv2('train/pairs', pool_pairs(images, size, self.single_object), it) if self._is_train: if (it) % self.report_interval == 0 and it != 0: if self.logger is not None: self.logger.log_scalar('train/lr', self.scheduler.get_last_lr()[0], it) self.logger.log_metrics('train', 'time', (time.time()-self.last_time)/self.report_interval, it) self.last_time = time.time() self.train_integrator.finalize('train', it) self.train_integrator.reset_except_hooks() if it % self.save_model_interval == 0 and it != 0: if self.logger is not None: self.save(it) # Backward pass # This should be done outside autocast # but I trained it like this and it worked fine # so I am keeping it this way for reference self.optimizer.zero_grad(set_to_none=True) if self.para['amp']: self.scaler.scale(losses['total_loss']).backward() self.scaler.step(self.optimizer) self.scaler.update() else: losses['total_loss'].backward() self.optimizer.step() self.scheduler.step() def save(self, it): if self.save_path is None: print('Saving has been disabled.') return os.makedirs(os.path.dirname(self.save_path), exist_ok=True) model_path = self.save_path + ('_%s.pth' % it) torch.save(self.STCN.module.state_dict(), model_path) print('Model saved to %s.' % model_path) self.save_checkpoint(it) def save_checkpoint(self, it): if self.save_path is None: print('Saving has been disabled.') return os.makedirs(os.path.dirname(self.save_path), exist_ok=True) checkpoint_path = self.save_path + '_checkpoint.pth' checkpoint = { 'it': it, 'network': self.STCN.module.state_dict(), 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict()} torch.save(checkpoint, checkpoint_path) print('Checkpoint saved to %s.' % checkpoint_path) def load_model(self, path): # This method loads everything and should be used to resume training map_location = 'cuda:%d' % self.local_rank checkpoint = torch.load(path, map_location={'cuda:0': map_location}) it = checkpoint['it'] network = checkpoint['network'] optimizer = checkpoint['optimizer'] scheduler = checkpoint['scheduler'] map_location = 'cuda:%d' % self.local_rank self.STCN.module.load_state_dict(network) self.optimizer.load_state_dict(optimizer) self.scheduler.load_state_dict(scheduler) print('Model loaded.') return it def load_network(self, path): # This method loads only the network weight and should be used to load a pretrained model map_location = 'cuda:%d' % self.local_rank src_dict = torch.load(path, map_location={'cuda:0': map_location}) # Maps SO weight (without other_mask) to MO weight (with other_mask) for k in list(src_dict.keys()): if k == 'value_encoder.conv1.weight': if src_dict[k].shape[1] == 4: pads = torch.zeros((64,1,7,7), device=src_dict[k].device) nn.init.orthogonal_(pads) src_dict[k] = torch.cat([src_dict[k], pads], 1) self.STCN.module.load_state_dict(src_dict) print('Network weight loaded:', path) def train(self): self._is_train = True self._do_log = True self.integrator = self.train_integrator # Shall be in eval() mode to freeze BN parameters self.STCN.eval() return self def val(self): self._is_train = False self._do_log = True self.STCN.eval() return self def test(self): self._is_train = False self._do_log = False self.STCN.eval() return self
10,190
Python
.py
212
35.150943
119
0.537809
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,965
cbam.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/cbam.py
# Modified from https://github.com/Jongchan/attention-module/blob/master/MODELS/cbam.py import torch import torch.nn as nn import torch.nn.functional as F class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(BasicConv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, x): x = self.conv(x) return x class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class ChannelGate(nn.Module): def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']): super(ChannelGate, self).__init__() self.gate_channels = gate_channels self.mlp = nn.Sequential( Flatten(), nn.Linear(gate_channels, gate_channels // reduction_ratio), nn.ReLU(), nn.Linear(gate_channels // reduction_ratio, gate_channels) ) self.pool_types = pool_types def forward(self, x): channel_att_sum = None for pool_type in self.pool_types: if pool_type=='avg': avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) channel_att_raw = self.mlp( avg_pool ) elif pool_type=='max': max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) channel_att_raw = self.mlp( max_pool ) if channel_att_sum is None: channel_att_sum = channel_att_raw else: channel_att_sum = channel_att_sum + channel_att_raw scale = torch.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x) return x * scale class ChannelPool(nn.Module): def forward(self, x): return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 ) class SpatialGate(nn.Module): def __init__(self): super(SpatialGate, self).__init__() kernel_size = 7 self.compress = ChannelPool() self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2) def forward(self, x): x_compress = self.compress(x) x_out = self.spatial(x_compress) scale = torch.sigmoid(x_out) # broadcasting return x * scale class CBAM(nn.Module): def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False): super(CBAM, self).__init__() self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types) self.no_spatial=no_spatial if not no_spatial: self.SpatialGate = SpatialGate() def forward(self, x): x_out = self.ChannelGate(x) if not self.no_spatial: x_out = self.SpatialGate(x_out) return x_out
3,042
Python
.py
67
36.671642
154
0.610455
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,966
mod_resnet.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/mod_resnet.py
""" mod_resnet.py - A modified ResNet structure We append extra channels to the first conv by some network surgery """ from collections import OrderedDict import math import torch import torch.nn as nn from torch.utils import model_zoo def load_weights_sequential(target, source_state, extra_chan=1): new_dict = OrderedDict() for k1, v1 in target.state_dict().items(): if not 'num_batches_tracked' in k1: if k1 in source_state: tar_v = source_state[k1] if v1.shape != tar_v.shape: # Init the new segmentation channel with zeros # print(v1.shape, tar_v.shape) c, _, w, h = v1.shape pads = torch.zeros((c,extra_chan,w,h), device=tar_v.device) nn.init.orthogonal_(pads) tar_v = torch.cat([tar_v, pads], 1) new_dict[k1] = tar_v target.load_state_dict(new_dict, strict=False) model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', } def conv3x3(in_planes, out_planes, stride=1, dilation=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers=(3, 4, 23, 3), extra_chan=1): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3+extra_chan, 64, kernel_size=7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride), nn.BatchNorm2d(planes * block.expansion), ) layers = [block(self.inplanes, planes, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers) def resnet18(pretrained=True, extra_chan=0): model = ResNet(BasicBlock, [2, 2, 2, 2], extra_chan) if pretrained: load_weights_sequential(model, model_zoo.load_url(model_urls['resnet18']), extra_chan) return model def resnet50(pretrained=True, extra_chan=0): model = ResNet(Bottleneck, [3, 4, 6, 3], extra_chan) if pretrained: load_weights_sequential(model, model_zoo.load_url(model_urls['resnet50']), extra_chan) return model
5,526
Python
.py
126
34.388889
95
0.60859
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,967
modules.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/modules.py
""" modules.py - This file stores the rathering boring network blocks. """ import torch import torch.nn as nn import torch.nn.functional as F from torchvision import models from model import mod_resnet from model import cbam class ResBlock(nn.Module): def __init__(self, indim, outdim=None): super(ResBlock, self).__init__() if outdim == None: outdim = indim if indim == outdim: self.downsample = None else: self.downsample = nn.Conv2d(indim, outdim, kernel_size=3, padding=1) self.conv1 = nn.Conv2d(indim, outdim, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(outdim, outdim, kernel_size=3, padding=1) def forward(self, x): r = self.conv1(F.relu(x)) r = self.conv2(F.relu(r)) if self.downsample is not None: x = self.downsample(x) return x + r class FeatureFusionBlock(nn.Module): def __init__(self, indim, outdim): super().__init__() self.block1 = ResBlock(indim, outdim) self.attention = cbam.CBAM(outdim) self.block2 = ResBlock(outdim, outdim) def forward(self, x, f16): x = torch.cat([x, f16], 1) x = self.block1(x) r = self.attention(x) x = self.block2(x + r) return x # Single object version, used only in static image pretraining # This will be loaded and modified into the multiple objects version later (in stage 1/2/3) # See model.py (load_network) for the modification procedure class ValueEncoderSO(nn.Module): def __init__(self): super().__init__() resnet = mod_resnet.resnet18(pretrained=True, extra_chan=1) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu # 1/2, 64 self.maxpool = resnet.maxpool self.layer1 = resnet.layer1 # 1/4, 64 self.layer2 = resnet.layer2 # 1/8, 128 self.layer3 = resnet.layer3 # 1/16, 256 self.fuser = FeatureFusionBlock(1024 + 256, 512) def forward(self, image, key_f16, mask): # key_f16 is the feature from the key encoder f = torch.cat([image, mask], 1) x = self.conv1(f) x = self.bn1(x) x = self.relu(x) # 1/2, 64 x = self.maxpool(x) # 1/4, 64 x = self.layer1(x) # 1/4, 64 x = self.layer2(x) # 1/8, 128 x = self.layer3(x) # 1/16, 256 x = self.fuser(x, key_f16) return x # Multiple objects version, used in other times class ValueEncoder(nn.Module): def __init__(self): super().__init__() resnet = mod_resnet.resnet18(pretrained=True, extra_chan=2) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu # 1/2, 64 self.maxpool = resnet.maxpool self.layer1 = resnet.layer1 # 1/4, 64 self.layer2 = resnet.layer2 # 1/8, 128 self.layer3 = resnet.layer3 # 1/16, 256 self.fuser = FeatureFusionBlock(1024 + 256, 512) def forward(self, image, key_f16, mask, other_masks): """ for each object, they stack image + mask + other_masks as multiple channels, and apply resnet18 layers on top of it """ # key_f16 is the feature from the key encoder f = torch.cat([image, mask, other_masks], 1) x = self.conv1(f) x = self.bn1(x) x = self.relu(x) # 1/2, 64 x = self.maxpool(x) # 1/4, 64 x = self.layer1(x) # 1/4, 64 x = self.layer2(x) # 1/8, 128 x = self.layer3(x) # 1/16, 256 x = self.fuser(x, key_f16) return x class KeyEncoder(nn.Module): def __init__(self): super().__init__() resnet = models.resnet50(pretrained=True) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu # 1/2, 64 self.maxpool = resnet.maxpool self.res2 = resnet.layer1 # 1/4, 256 self.layer2 = resnet.layer2 # 1/8, 512 self.layer3 = resnet.layer3 # 1/16, 1024 def forward(self, f): x = self.conv1(f) x = self.bn1(x) x = self.relu(x) # 1/2, 64 x = self.maxpool(x) # 1/4, 64 f4 = self.res2(x) # 1/4, 256 f8 = self.layer2(f4) # 1/8, 512 f16 = self.layer3(f8) # 1/16, 1024 return f16, f8, f4 class UpsampleBlock(nn.Module): def __init__(self, skip_c, up_c, out_c, scale_factor=2): super().__init__() self.skip_conv = nn.Conv2d(skip_c, up_c, kernel_size=3, padding=1) self.out_conv = ResBlock(up_c, out_c) self.scale_factor = scale_factor def forward(self, skip_f, up_f): x = self.skip_conv(skip_f) x = x + F.interpolate(up_f, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) x = self.out_conv(x) return x class KeyProjection(nn.Module): def __init__(self, indim, keydim): super().__init__() self.key_proj = nn.Conv2d(indim, keydim, kernel_size=3, padding=1) nn.init.orthogonal_(self.key_proj.weight.data) nn.init.zeros_(self.key_proj.bias.data) def forward(self, x): return self.key_proj(x)
5,213
Python
.py
133
31.233083
105
0.593545
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,968
aggregate.py
ttt-matching-based-vos_ttt_matching_vos/STCN/model/aggregate.py
import torch import torch.nn.functional as F # Soft aggregation from STM def aggregate(prob, keep_bg=False): logits = get_logits(prob) return get_softmax(logits, keep_bg) def get_logits(prob): # add the background mask new_prob = torch.cat([ torch.prod(1-prob, dim=0, keepdim=True), prob ], 0).clamp(1e-7, 1-1e-7) logits = torch.log((new_prob /(1-new_prob))) return logits def get_softmax(logits, keep_bg=False): if keep_bg: return F.softmax(logits, dim=0) else: return F.softmax(logits, dim=0)[1:] def get_log_softmax(logits, keep_bg=False): if keep_bg: return F.log_softmax(logits, dim=0) else: return F.log_softmax(logits, dim=0)[1:] def get_entropy(logits, keep_bg=False): entropy = get_softmax(logits, keep_bg) * get_log_softmax(logits, keep_bg) entropy = -1.0 * entropy.sum() return entropy.mean() def aggregate0(prob, keep_bg=False): # add the background mask new_prob = torch.cat([ torch.prod(1-prob, dim=0, keepdim=True), prob ], 0).clamp(1e-7, 1-1e-7) logits = torch.log((new_prob /(1-new_prob))) if keep_bg: return F.softmax(logits, dim=0) else: return F.softmax(logits, dim=0)[1:]
1,265
Python
.py
39
27.025641
77
0.643328
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,969
davisc_create.py
ttt-matching-based-vos_ttt_matching_vos/DAVIS-C/davisc_create.py
import os, glob, cv2, subprocess, numpy as np import tensorflow_hub as hub import tensorflow as tf from PIL import Image import skimage.color as cl from skimage.filters import gaussian from imgaug.augmenters.artistic import stylize_cartoon DAVIS_VIDEOS = ['bike-packing', 'blackswan', 'bmx-trees', 'breakdance', 'camel', 'car-roundabout', 'car-shadow', 'cows', 'dance-twirl', 'dog', 'dogs-jump', 'drift-chicane', 'drift-straight', 'goat', 'gold-fish', 'horsejump-high', 'india', 'judo', 'kite-surf', 'lab-coat', 'libby', 'loading', 'mbike-trick', 'motocross-jump', 'paragliding-launch', 'parkour', 'pigs', 'scooter-black', 'shooting', 'soapbox'] def checkdir(dr): if not os.path.exists(dr): os.mkdir(dr) ##### ------------------------------------------------------------------------------------------ ##### functions from the creation of the ImageNet-C dataset ##### https://github.com/hendrycks/robustness/blob/master/ImageNet-C/create_c/make_imagenet_c.py ##### ------------------------------------------------------------------------------------------ def glass_blur(y, severity=1): # sigma, max_delta, iterations c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1] x = np.uint8(gaussian(np.array(y) / 255., sigma=c[0], multichannel=True) * 255) # locally shuffle pixels for i in range(c[2]): for h in range(y.size[1] - c[1], c[1], -1): for w in range(y.size[0] - c[1], c[1], -1): dx, dy = np.random.randint(-c[1], c[1], size=(2,)) h_prime, w_prime = h + dy, w + dx # swap x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w] return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255 def defocus_blur(y, severity=1): c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1] x = np.array(y) / 255. kernel = disk(radius=c[0], alias_blur=c[1]) channels = [] for d in range(3): channels.append(cv2.filter2D(x[:, :, d], -1, kernel)) channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3 return np.clip(channels, 0, 1) * 255 def gaussian_noise(y, severity=1): c = [.08, .12, 0.18, 0.26, 0.38][severity - 1] x = np.array(y) / 255. return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255 def contrast(y, severity=1): c = [0.4, .3, .2, .1, .05][severity - 1] x = np.array(y) / 255. means = np.mean(x, axis=(0, 1), keepdims=True) return np.clip((x - means) * c + means, 0, 1) * 255 def brightness(y, severity=1): c = [.1, .2, .3, .4, .5][severity - 1] x = np.array(y) / 255. x = cl.rgb2hsv(x) x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1) x = cl.hsv2rgb(x) return np.clip(x, 0, 1) * 255 def saturate(y, severity=1): c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1] x = np.array(y) / 255. x = cl.rgb2hsv(x) x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1) x = cl.hsv2rgb(x) return np.clip(x, 0, 1) * 255 def pixelate(y, severity=1): c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1] x = y.resize((int(y.size[0] * c), int(y.size[1] * c)), Image.BOX) x = x.resize((y.size[0], y.size[1]), Image.BOX) return x def disk(radius, alias_blur=0.1, dtype=np.float32): if radius <= 8: L = np.arange(-8, 8 + 1) ksize = (3, 3) else: L = np.arange(-radius, radius + 1) ksize = (5, 5) X, Y = np.meshgrid(L, L) aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) # supersample disk to antialias return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur) ##### ------------------------------------------------------------------------------------------ ##### code borrowed and modified from ##### Neural Style Transfer Transition Video Processing ##### By Brycen Westgarth and Tristan Jogminas ##### https://github.com/westgarthb/style-transfer-video-processor ##### ------------------------------------------------------------------------------------------ class Style: def __init__(self, style_files): os.environ['TFHUB_CACHE_DIR'] = f'./tensorflow_cache' self.hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2') self.style_files = style_files self.get_style_info() def get_style_info(self): self.style = [] for fn in self.style_files: style = cv2.imread(fn) style = cv2.cvtColor(style, cv2.COLOR_BGR2RGB) style = style / 255.0 style = tf.cast(tf.convert_to_tensor(style), tf.float32) self.style.append(tf.constant(tf.expand_dims(style, axis=0))) def _trim_img(self, img, frame_width, frame_height): return img[:frame_height, :frame_width] def stylize(self,content, style, frame_width, frame_height, preserve_colors = True): content = cv2.cvtColor(content, cv2.COLOR_BGR2RGB) / 255.0 content = tf.cast(tf.convert_to_tensor(content), tf.float32) expanded_content = tf.constant(tf.expand_dims(content, axis=0)) # Apply style transfer stylized_img = self.hub_module(expanded_content, style).pop() stylized_img = tf.squeeze(stylized_img) stylized_img = np.asarray(self._trim_img(stylized_img, frame_width, frame_height)) if preserve_colors: stylized_img = self._color_correct_to_input(content, stylized_img, frame_width, frame_height) stylized_img = cv2.cvtColor(stylized_img, cv2.COLOR_RGB2BGR) * 255.0 return stylized_img def _color_correct_to_input(self, content, generated, frame_width, frame_height): # image manipulations for compatibility with opencv content = np.array((content * 255.0), dtype=np.float32) content = cv2.cvtColor(content, cv2.COLOR_BGR2YCR_CB) generated = np.array((generated * 255.0), dtype=np.float32) generated = cv2.cvtColor(generated, cv2.COLOR_BGR2YCR_CB) generated = self._trim_img(generated, frame_width, frame_height) # extract channels, merge intensity and color spaces color_corrected = np.zeros(generated.shape, dtype=np.float32) color_corrected[:, :, 0] = generated[:, :, 0] color_corrected[:, :, 1] = content[:, :, 1] color_corrected[:, :, 2] = content[:, :, 2] return cv2.cvtColor(color_corrected, cv2.COLOR_YCrCb2BGR) / 255.0 ##### ------------------------------------------------------------------------------------------ ##### load DAVIS and create DAVIS-C ##### ##### ------------------------------------------------------------------------------------------ def process_frames(input_folder, output_folder, severity=1): trans_names = ['gaussian_noise', 'contrast', 'brightness', 'saturate', 'glass_blur', 'defocus_blur', 'pixelate', 'cartoon', 'motion_blur', 'crf_compression', 'style1', 'style2', 'style3', 'style4'] st = Style(['styles/1.png', 'styles/2.png', 'styles/3.png', 'styles/4.png']) print(input_folder) input_folder_files = glob.glob(f'{input_folder}/*') if len(input_folder_files): # Retrieve an image in the input frame dir to get the width img = cv2.imread(input_folder_files[0]) frame_width = img.shape[1] frame_height = img.shape[0] checkdir(output_folder) for t in trans_names: checkdir(output_folder+t+"/") for t in trans_names: checkdir(output_folder+t+"/"+input_folder.split('/')[-2]) motionblur_size = [0, 0, 2, 3, 4][severity-1] framelist = [] for count, filename in enumerate(sorted(input_folder_files)): content_img = cv2.imread(filename) img_pil = Image.fromarray(content_img) # from IMAGENET-C x = gaussian_noise(img_pil, severity=severity) cv2.imwrite((output_folder+"gaussian_noise/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = contrast(img_pil, severity=severity) cv2.imwrite((output_folder+"contrast/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = brightness(img_pil, severity=severity) cv2.imwrite((output_folder+"brightness/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = saturate(img_pil, severity=severity) cv2.imwrite((output_folder+"saturate/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = glass_blur(img_pil, severity=severity) cv2.imwrite((output_folder+"glass_blur/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = defocus_blur(img_pil, severity=severity) cv2.imwrite((output_folder+"defocus_blur/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) x = pixelate(img_pil, severity=severity) cv2.imwrite((output_folder+"pixelate/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), np.array(x)) # not from IMAGENET-C # cartoon x = stylize_cartoon(content_img, blur_ksize=[1, 1, 1, 3, 5][severity-1], segmentation_size=[1.0, 1.0, 1.0, 1.2, 1.5][severity-1], saturation=[1.0, 1.0, 1.0, 1.5, 2.0][severity-1], edge_prevalence=.8, suppress_edges=True) cv2.imwrite((output_folder+"cartoon/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) # stylize for j in range(len(st.style)): x = st.stylize(content_img, st.style[j], frame_width=frame_width, frame_height=frame_height, preserve_colors=severity <= 4) cv2.imwrite((output_folder+"style{}".format(j+1)+"/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), x) # motion blur framelist.append(img_pil) if len(framelist) > motionblur_size: framelist.pop(0) w = 1 totalw = w x = np.array(framelist[0]).astype(np.float32) for j in range(1, len(framelist)): w+=1 x += w*np.array(framelist[j]).astype(np.float32) totalw += w x /= totalw cv2.imwrite((output_folder+"motion_blur/"+input_folder.split('/')[-2]+'/'+filename.replace(input_folder, '')), np.array(x)) # crf-compression - for the whole video with ffmpeg subprocess.run("rm out.mp4 out2.mp4", shell=True) cmd_str = "ffmpeg -y -framerate 24 -pattern_type glob -i \'{}/*.jpg\' out.mp4".format(input_folder) subprocess.run(cmd_str, shell=True) crf_value = [0, 0, 40, 45, 50][severity-1] cmd_str = "ffmpeg -y -i out.mp4 -vcodec libx265 -crf {} out2.mp4".format(crf_value) subprocess.run(cmd_str, shell=True) cmd_str = "ffmpeg -y -i out2.mp4 -r 24 -qscale:v 2 -start_number 0 {}/%05d.jpg".format(output_folder+"crf_compression/"+input_folder.split('/')[-2]+'/') subprocess.run(cmd_str, shell=True) if __name__ == "__main__": dir_davis = 'DAVIS/DAVIS17/JPEGImages/480p/' dir_out_main = 'DAVISC/' checkdir(dir_out_main) for severity in [3, 4, 5]: print("severity {}".format(severity), flush=True) if severity == 3: print("low") dir_out = dir_out_main+'low/' elif severity == 4: print("med") dir_out = dir_out_main+'med/' elif severity == 5: print("high") dir_out = dir_out_main+'high/' checkdir(dir_out) for v in DAVIS_VIDEOS: process_frames(dir_davis+v+'/', dir_out, severity = severity)
12,364
Python
.py
246
40.861789
156
0.548933
ttt-matching-based-vos/ttt_matching_vos
8
0
0
GPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,970
setup.py
NioTheFirst_ScType/setup.py
from setuptools import setup, find_packages with open("README.md", "r", encoding="utf-8") as f: long_description = f.read() setup( name="slither-analyzer", description="Slither is a Solidity static analysis framework written in Python 3.", url="https://github.com/crytic/slither", author="Trail of Bits", version="0.9.3", packages=find_packages(), python_requires=">=3.8", install_requires=[ "packaging", "prettytable>=3.3.0", "pycryptodome>=3.4.6", # "crytic-compile>=0.3.1,<0.4.0", "crytic-compile@git+https://github.com/crytic/crytic-compile.git@dev#egg=crytic-compile", "web3>=6.0.0", "eth-abi>=4.0.0", "eth-typing>=3.0.0", "eth-utils>=2.1.0", ], extras_require={ "lint": [ "black==22.3.0", "pylint==2.13.4", ], "test": [ "pytest", "pytest-cov", "pytest-xdist", "deepdiff", "numpy", "coverage[toml]", "filelock", "pytest-insta", "solc-select@git+https://github.com/crytic/solc-select.git@query-artifact-path#egg=solc-select", ], "doc": [ "pdoc", ], "dev": [ "slither-analyzer[lint,test,doc]", "openai", ], }, license="AGPL-3.0", long_description=long_description, long_description_content_type="text/markdown", entry_points={ "console_scripts": [ "slither = slither.__main__:main", "slither-check-upgradeability = slither.tools.upgradeability.__main__:main", "slither-find-paths = slither.tools.possible_paths.__main__:main", "slither-simil = slither.tools.similarity.__main__:main", "slither-flat = slither.tools.flattening.__main__:main", "slither-format = slither.tools.slither_format.__main__:main", "slither-check-erc = slither.tools.erc_conformance.__main__:main", "slither-check-kspec = slither.tools.kspec_coverage.__main__:main", "slither-prop = slither.tools.properties.__main__:main", "slither-mutate = slither.tools.mutator.__main__:main", "slither-read-storage = slither.tools.read_storage.__main__:main", "slither-doctor = slither.tools.doctor.__main__:main", "slither-documentation = slither.tools.documentation.__main__:main", "slither-interface = slither.tools.interface.__main__:main", ] }, )
2,573
Python
.py
68
28.632353
108
0.565721
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,971
financial_type_keys.py
NioTheFirst_ScType/financial_type_keys.py
''' Copy of the table with `tcheck_parser.py`. This table stores the key associated with each financial type. ''' f_type_num = { -1: "undef", 0: "raw balance", 1: "net balance", 2: "accrued balance", 3: "final balance", 10: "compound fee ratio (t)", 11: "transaction fee", 12: "simple fee ratio", 13: "transaction fee (n)", 14: "transaction fee (d)", 20: "simple interest ratio", 21: "compound interest ratio", 22: "simple interest", 23: "compound interest", 30: "reserve", 40: "price/exchange rate", 50: "debt", 60: "dividend" }
604
Python
.py
24
21.083333
62
0.612069
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,972
json_diff.py
NioTheFirst_ScType/scripts/json_diff.py
import sys import json from pprint import pprint from deepdiff import DeepDiff # pip install deepdiff if len(sys.argv) != 3: print("Usage: python json_diff.py 1.json 2.json") sys.exit(-1) with open(sys.argv[1], encoding="utf8") as f: d1 = json.load(f) with open(sys.argv[2], encoding="utf8") as f: d2 = json.load(f) # Remove description field to allow non deterministic print for elem in d1: if "description" in elem: del elem["description"] for elem in d2: if "description" in elem: del elem["description"] pprint(DeepDiff(d1, d2, ignore_order=True, verbose_level=2))
618
Python
.py
19
29
60
0.707276
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,973
__about__.py
NioTheFirst_ScType/slither/__about__.py
# SPDX-FileCopyrightText: 2023-present NioTheFirst <[email protected]> # # SPDX-License-Identifier: MIT __version__ = "0.1.0"
136
Python
.py
4
33
80
0.772727
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,974
tcheck_module.py
NioTheFirst_ScType/slither/tcheck_module.py
total_compilations = -1 def update_total_compilations(total): global total_compilations total_compilations = total def get_total_compilations(): global total_compilations return(total_compilations)
217
Python
.py
7
27.285714
37
0.78744
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,975
__main__.py
NioTheFirst_ScType/slither/__main__.py
#!/usr/bin/env python3 import argparse import cProfile import glob import inspect import json import logging import os import pstats import sys import traceback from typing import Tuple, Optional, List, Dict, Type, Union, Any, Sequence from pkg_resources import iter_entry_points, require from crytic_compile import cryticparser, CryticCompile from crytic_compile.platform.standard import generate_standard_export from crytic_compile.platform.etherscan import SUPPORTED_NETWORK from crytic_compile import compile_all, is_supported from slither.detectors import all_detectors from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification from slither import tcheck_module from slither.printers import all_printers from slither.printers.abstract_printer import AbstractPrinter from slither.slither import Slither from slither.utils import codex from slither.utils.output import output_to_json, output_to_zip, output_to_sarif, ZIP_TYPES_ACCEPTED from slither.utils.output_capture import StandardOutputCapture from slither.utils.colors import red, set_colorization_enabled from slither.utils.command_line import ( output_detectors, output_results_to_markdown, output_detectors_json, output_printers, output_printers_json, output_to_markdown, output_wiki, defaults_flag_in_config, read_config_file, JSON_OUTPUT_TYPES, DEFAULT_JSON_OUTPUT_TYPES, check_and_sanitize_markdown_root, ) from slither.exceptions import SlitherException from slither.sctype_cf_pairs import add_cf_pair, add_cont_with_state_var logging.basicConfig() logger = logging.getLogger("Slither") ################################################################################### ################################################################################### # region Process functions ################################################################################### ################################################################################### def process_single( target: Union[str, CryticCompile], args: argparse.Namespace, detector_classes: List[Type[AbstractDetector]], printer_classes: List[Type[AbstractPrinter]], ) -> Tuple[Slither, List[Dict], List[Dict], int]: """ The core high-level code for running Slither static analysis. Returns: list(result), int: Result list and number of contracts analyzed """ ast = "--ast-compact-json" if args.legacy_ast: ast = "--ast-json" if args.checklist: args.show_ignored_findings = True slither = Slither(target, ast_format=ast, **vars(args)) return _process(slither, detector_classes, printer_classes) def preprocess_single( target: Union[str, CryticCompile], args: argparse.Namespace, ) -> Tuple[Slither, List[Dict], List[Dict], int]: """ The core high-level code for running Slither static analysis. Returns: list(result), int: Result list and number of contracts analyzed """ ast = "--ast-compact-json" if args.legacy_ast: ast = "--ast-json" if args.checklist: args.show_ignored_findings = True slither = Slither(target, ast_format=ast, **vars(args)) for contract in slither.contracts: #print(contract.name) add_cont_with_state_var(contract.name, contract) for function in contract.functions_declared: #print(f"{function.name}: {function.entry_point}") add_cf_pair(contract.name, function.name, function) #print("--------") def process_all( target: str, args: argparse.Namespace, detector_classes: List[Type[AbstractDetector]], printer_classes: List[Type[AbstractPrinter]], ) -> Tuple[List[Slither], List[Dict], List[Dict], int]: compilations = compile_all(target, **vars(args)) slither_instances = [] results_detectors = [] results_printers = [] analyzed_contracts_count = 0 tcheck_module.update_total_compilations(len(compilations)) for compilation in compilations: preprocess_single(compilation, args) for compilation in compilations: ( slither, current_results_detectors, current_results_printers, current_analyzed_count, ) = process_single(compilation, args, detector_classes, printer_classes) results_detectors.extend(current_results_detectors) results_printers.extend(current_results_printers) slither_instances.append(slither) analyzed_contracts_count += current_analyzed_count return ( slither_instances, results_detectors, results_printers, analyzed_contracts_count, ) def _process( slither: Slither, detector_classes: List[Type[AbstractDetector]], printer_classes: List[Type[AbstractPrinter]], ) -> Tuple[Slither, List[Dict], List[Dict], int]: for detector_cls in detector_classes: slither.register_detector(detector_cls) for printer_cls in printer_classes: slither.register_printer(printer_cls) analyzed_contracts_count = len(slither.contracts) #print(f"Analyzed_contracts_count: {analyzed_contracts_count}") results_detectors = [] results_printers = [] if not printer_classes: detector_results = slither.run_detectors() detector_results = [x for x in detector_results if x] # remove empty results detector_results = [item for sublist in detector_results for item in sublist] # flatten results_detectors.extend(detector_results) else: printer_results = slither.run_printers() printer_results = [x for x in printer_results if x] # remove empty results results_printers.extend(printer_results) return slither, results_detectors, results_printers, analyzed_contracts_count # TODO: delete me? def process_from_asts( filenames: List[str], args: argparse.Namespace, detector_classes: List[Type[AbstractDetector]], printer_classes: List[Type[AbstractPrinter]], ) -> Tuple[Slither, List[Dict], List[Dict], int]: all_contracts: List[str] = [] for filename in filenames: with open(filename, encoding="utf8") as file_open: contract_loaded = json.load(file_open) all_contracts.append(contract_loaded["ast"]) return process_single(all_contracts, args, detector_classes, printer_classes) # endregion ################################################################################### ################################################################################### # region Detectors and printers ################################################################################### ################################################################################### def get_detectors_and_printers() -> Tuple[ List[Type[AbstractDetector]], List[Type[AbstractPrinter]] ]: detectors_ = [getattr(all_detectors, name) for name in dir(all_detectors)] detectors = [d for d in detectors_ if inspect.isclass(d) and issubclass(d, AbstractDetector)] printers_ = [getattr(all_printers, name) for name in dir(all_printers)] printers = [p for p in printers_ if inspect.isclass(p) and issubclass(p, AbstractPrinter)] # Handle plugins! for entry_point in iter_entry_points(group="slither_analyzer.plugin", name=None): make_plugin = entry_point.load() plugin_detectors, plugin_printers = make_plugin() detector = None if not all(issubclass(detector, AbstractDetector) for detector in plugin_detectors): raise Exception( f"Error when loading plugin {entry_point}, {detector} is not a detector" ) printer = None if not all(issubclass(printer, AbstractPrinter) for printer in plugin_printers): raise Exception(f"Error when loading plugin {entry_point}, {printer} is not a printer") # We convert those to lists in case someone returns a tuple detectors += list(plugin_detectors) printers += list(plugin_printers) return detectors, printers # pylint: disable=too-many-branches def choose_detectors( args: argparse.Namespace, all_detector_classes: List[Type[AbstractDetector]] ) -> List[Type[AbstractDetector]]: # If detectors are specified, run only these ones detectors_to_run = [] detectors = {d.ARGUMENT: d for d in all_detector_classes} if args.detectors_to_run == "all": detectors_to_run = all_detector_classes if args.detectors_to_exclude: detectors_excluded = args.detectors_to_exclude.split(",") for detector in detectors: if detector in detectors_excluded: detectors_to_run.remove(detectors[detector]) else: for detector in args.detectors_to_run.split(","): if detector in detectors: detectors_to_run.append(detectors[detector]) else: raise Exception(f"Error: {detector} is not a detector") detectors_to_run = sorted(detectors_to_run, key=lambda x: x.IMPACT) return detectors_to_run if args.exclude_optimization and not args.fail_pedantic: detectors_to_run = [ d for d in detectors_to_run if d.IMPACT != DetectorClassification.OPTIMIZATION ] if args.exclude_informational and not args.fail_pedantic: detectors_to_run = [ d for d in detectors_to_run if d.IMPACT != DetectorClassification.INFORMATIONAL ] if args.exclude_low and not args.fail_low: detectors_to_run = [d for d in detectors_to_run if d.IMPACT != DetectorClassification.LOW] if args.exclude_medium and not args.fail_medium: detectors_to_run = [ d for d in detectors_to_run if d.IMPACT != DetectorClassification.MEDIUM ] if args.exclude_high and not args.fail_high: detectors_to_run = [d for d in detectors_to_run if d.IMPACT != DetectorClassification.HIGH] if args.detectors_to_exclude: detectors_to_run = [ d for d in detectors_to_run if d.ARGUMENT not in args.detectors_to_exclude ] detectors_to_run = sorted(detectors_to_run, key=lambda x: x.IMPACT) return detectors_to_run def choose_printers( args: argparse.Namespace, all_printer_classes: List[Type[AbstractPrinter]] ) -> List[Type[AbstractPrinter]]: printers_to_run = [] # disable default printer if args.printers_to_run is None: return [] if args.printers_to_run == "all": return all_printer_classes printers = {p.ARGUMENT: p for p in all_printer_classes} for printer in args.printers_to_run.split(","): if printer in printers: printers_to_run.append(printers[printer]) else: raise Exception(f"Error: {printer} is not a printer") return printers_to_run # endregion ################################################################################### ################################################################################### # region Command line parsing ################################################################################### ################################################################################### def parse_filter_paths(args: argparse.Namespace) -> List[str]: if args.filter_paths: return args.filter_paths.split(",") return [] # pylint: disable=too-many-statements def parse_args( detector_classes: List[Type[AbstractDetector]], printer_classes: List[Type[AbstractPrinter]] ) -> argparse.Namespace: usage = "slither target [flag]\n" usage += "\ntarget can be:\n" usage += "\t- file.sol // a Solidity file\n" usage += "\t- project_directory // a project directory. See https://github.com/crytic/crytic-compile/#crytic-compile for the supported platforms\n" usage += "\t- 0x.. // a contract on mainnet\n" usage += f"\t- NETWORK:0x.. // a contract on a different network. Supported networks: {','.join(x[:-1] for x in SUPPORTED_NETWORK)}\n" parser = argparse.ArgumentParser( description="For usage information, see https://github.com/crytic/slither/wiki/Usage", usage=usage, ) parser.add_argument("filename", help=argparse.SUPPRESS) cryticparser.init(parser) parser.add_argument( "--version", help="displays the current version", version=require("slither-analyzer")[0].version, action="version", ) group_detector = parser.add_argument_group("Detectors") group_printer = parser.add_argument_group("Printers") group_checklist = parser.add_argument_group( "Checklist (consider using https://github.com/crytic/slither-action)" ) group_misc = parser.add_argument_group("Additional options") group_detector.add_argument( "--detect", help="Comma-separated list of detectors, defaults to all, " f"available detectors: {', '.join(d.ARGUMENT for d in detector_classes)}", action="store", dest="detectors_to_run", default=defaults_flag_in_config["detectors_to_run"], ) group_printer.add_argument( "--print", help="Comma-separated list of contract information printers, " f"available printers: {', '.join(d.ARGUMENT for d in printer_classes)}", action="store", dest="printers_to_run", default=defaults_flag_in_config["printers_to_run"], ) group_detector.add_argument( "--list-detectors", help="List available detectors", action=ListDetectors, nargs=0, default=False, ) group_printer.add_argument( "--list-printers", help="List available printers", action=ListPrinters, nargs=0, default=False, ) group_detector.add_argument( "--exclude", help="Comma-separated list of detectors that should be excluded", action="store", dest="detectors_to_exclude", default=defaults_flag_in_config["detectors_to_exclude"], ) group_detector.add_argument( "--exclude-dependencies", help="Exclude results that are only related to dependencies", action="store_true", default=defaults_flag_in_config["exclude_dependencies"], ) group_detector.add_argument( "--exclude-optimization", help="Exclude optimization analyses", action="store_true", default=defaults_flag_in_config["exclude_optimization"], ) group_detector.add_argument( "--exclude-informational", help="Exclude informational impact analyses", action="store_true", default=defaults_flag_in_config["exclude_informational"], ) group_detector.add_argument( "--exclude-low", help="Exclude low impact analyses", action="store_true", default=defaults_flag_in_config["exclude_low"], ) group_detector.add_argument( "--exclude-medium", help="Exclude medium impact analyses", action="store_true", default=defaults_flag_in_config["exclude_medium"], ) group_detector.add_argument( "--exclude-high", help="Exclude high impact analyses", action="store_true", default=defaults_flag_in_config["exclude_high"], ) group_detector.add_argument( "--fail-pedantic", help="Return the number of findings in the exit code", action="store_true", default=defaults_flag_in_config["fail_pedantic"], ) group_detector.add_argument( "--no-fail-pedantic", help="Do not return the number of findings in the exit code. Opposite of --fail-pedantic", dest="fail_pedantic", action="store_false", required=False, ) group_detector.add_argument( "--fail-low", help="Fail if low or greater impact finding is detected", action="store_true", default=defaults_flag_in_config["fail_low"], ) group_detector.add_argument( "--fail-medium", help="Fail if medium or greater impact finding is detected", action="store_true", default=defaults_flag_in_config["fail_medium"], ) group_detector.add_argument( "--fail-high", help="Fail if high impact finding is detected", action="store_true", default=defaults_flag_in_config["fail_high"], ) group_detector.add_argument( "--show-ignored-findings", help="Show all the findings", action="store_true", default=defaults_flag_in_config["show_ignored_findings"], ) group_checklist.add_argument( "--checklist", help="Generate a markdown page with the detector results", action="store_true", default=False, ) group_checklist.add_argument( "--checklist-limit", help="Limite the number of results per detector in the markdown file", action="store", default="", ) group_checklist.add_argument( "--markdown-root", type=check_and_sanitize_markdown_root, help="URL for markdown generation", action="store", default="", ) group_misc.add_argument( "--json", help='Export the results as a JSON file ("--json -" to export to stdout)', action="store", default=defaults_flag_in_config["json"], ) group_misc.add_argument( "--sarif", help='Export the results as a SARIF JSON file ("--sarif -" to export to stdout)', action="store", default=defaults_flag_in_config["sarif"], ) group_misc.add_argument( "--json-types", help="Comma-separated list of result types to output to JSON, defaults to " + f'{",".join(output_type for output_type in DEFAULT_JSON_OUTPUT_TYPES)}. ' + f'Available types: {",".join(output_type for output_type in JSON_OUTPUT_TYPES)}', action="store", default=defaults_flag_in_config["json-types"], ) group_misc.add_argument( "--zip", help="Export the results as a zipped JSON file", action="store", default=defaults_flag_in_config["zip"], ) group_misc.add_argument( "--zip-type", help=f'Zip compression type. One of {",".join(ZIP_TYPES_ACCEPTED.keys())}. Default lzma', action="store", default=defaults_flag_in_config["zip_type"], ) group_misc.add_argument( "--disable-color", help="Disable output colorization", action="store_true", default=defaults_flag_in_config["disable_color"], ) group_misc.add_argument( "--filter-paths", help="Comma-separated list of paths for which results will be excluded", action="store", dest="filter_paths", default=defaults_flag_in_config["filter_paths"], ) group_misc.add_argument( "--triage-mode", help="Run triage mode (save results in slither.db.json)", action="store_true", dest="triage_mode", default=False, ) group_misc.add_argument( "--config-file", help="Provide a config file (default: slither.config.json)", action="store", dest="config_file", default=None, ) group_misc.add_argument( "--change-line-prefix", help="Change the line prefix (default #) for the displayed source codes (i.e. file.sol#1).", action="store", dest="change_line_prefix", default="#", ) group_misc.add_argument( "--solc-ast", help="Provide the contract as a json AST", action="store_true", default=False, ) group_misc.add_argument( "--generate-patches", help="Generate patches (json output only)", action="store_true", default=False, ) group_misc.add_argument( "--no-fail", help="Do not fail in case of parsing (echidna mode only)", action="store_true", default=defaults_flag_in_config["no_fail"], ) codex.init_parser(parser) # debugger command parser.add_argument("--debug", help=argparse.SUPPRESS, action="store_true", default=False) parser.add_argument("--markdown", help=argparse.SUPPRESS, action=OutputMarkdown, default=False) parser.add_argument( "--wiki-detectors", help=argparse.SUPPRESS, action=OutputWiki, default=False ) parser.add_argument( "--list-detectors-json", help=argparse.SUPPRESS, action=ListDetectorsJson, nargs=0, default=False, ) parser.add_argument( "--legacy-ast", help=argparse.SUPPRESS, action="store_true", default=defaults_flag_in_config["legacy_ast"], ) parser.add_argument( "--skip-assembly", help=argparse.SUPPRESS, action="store_true", default=defaults_flag_in_config["skip_assembly"], ) parser.add_argument( "--perf", help=argparse.SUPPRESS, action="store_true", default=False, ) # if the json is splitted in different files parser.add_argument("--splitted", help=argparse.SUPPRESS, action="store_true", default=False) # Disable the throw/catch on partial analyses parser.add_argument( "--disallow-partial", help=argparse.SUPPRESS, action="store_true", default=False ) if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) args = parser.parse_args() read_config_file(args) args.filter_paths = parse_filter_paths(args) # Verify our json-type output is valid args.json_types = set(args.json_types.split(",")) for json_type in args.json_types: if json_type not in JSON_OUTPUT_TYPES: raise Exception(f'Error: "{json_type}" is not a valid JSON result output type.') return args class ListDetectors(argparse.Action): # pylint: disable=too-few-public-methods def __call__(self, parser, *args, **kwargs): # pylint: disable=signature-differs detectors, _ = get_detectors_and_printers() output_detectors(detectors) parser.exit() class ListDetectorsJson(argparse.Action): # pylint: disable=too-few-public-methods def __call__( self, parser: Any, *args: Any, **kwargs: Any ) -> None: # pylint: disable=signature-differs detectors, _ = get_detectors_and_printers() detector_types_json = output_detectors_json(detectors) print(json.dumps(detector_types_json)) parser.exit() class ListPrinters(argparse.Action): # pylint: disable=too-few-public-methods def __call__( self, parser: Any, *args: Any, **kwargs: Any ) -> None: # pylint: disable=signature-differs _, printers = get_detectors_and_printers() output_printers(printers) parser.exit() class OutputMarkdown(argparse.Action): # pylint: disable=too-few-public-methods def __call__( self, parser: Any, args: Any, values: Optional[Union[str, Sequence[Any]]], option_string: Any = None, ) -> None: detectors, printers = get_detectors_and_printers() assert isinstance(values, str) output_to_markdown(detectors, printers, values) parser.exit() class OutputWiki(argparse.Action): # pylint: disable=too-few-public-methods def __call__( self, parser: Any, args: Any, values: Optional[Union[str, Sequence[Any]]], option_string: Any = None, ) -> None: detectors, _ = get_detectors_and_printers() assert isinstance(values, str) output_wiki(detectors, values) parser.exit() # endregion ################################################################################### ################################################################################### # region CustomFormatter ################################################################################### ################################################################################### class FormatterCryticCompile(logging.Formatter): def format(self, record): # for i, msg in enumerate(record.msg): if record.msg.startswith("Compilation warnings/errors on "): txt = record.args[1] txt = txt.split("\n") txt = [red(x) if "Error" in x else x for x in txt] txt = "\n".join(txt) record.args = (record.args[0], txt) return super().format(record) # endregion ################################################################################### ################################################################################### # region Main ################################################################################### ################################################################################### def main() -> None: # Codebase with complex domninators can lead to a lot of SSA recursive call sys.setrecursionlimit(1500) detectors, printers = get_detectors_and_printers() main_impl(all_detector_classes=detectors, all_printer_classes=printers) # pylint: disable=too-many-statements,too-many-branches,too-many-locals def main_impl( all_detector_classes: List[Type[AbstractDetector]], all_printer_classes: List[Type[AbstractPrinter]], ) -> None: """ :param all_detector_classes: A list of all detectors that can be included/excluded. :param all_printer_classes: A list of all printers that can be included. """ # Set logger of Slither to info, to catch warnings related to the arg parsing logger.setLevel(logging.INFO) args = parse_args(all_detector_classes, all_printer_classes) cp: Optional[cProfile.Profile] = None if args.perf: cp = cProfile.Profile() cp.enable() # Set colorization option set_colorization_enabled(False if args.disable_color else sys.stdout.isatty()) # Define some variables for potential JSON output json_results = {} output_error = None outputting_json = args.json is not None outputting_json_stdout = args.json == "-" outputting_sarif = args.sarif is not None outputting_sarif_stdout = args.sarif == "-" outputting_zip = args.zip is not None if args.zip_type not in ZIP_TYPES_ACCEPTED: to_log = f'Zip type not accepted, it must be one of {",".join(ZIP_TYPES_ACCEPTED.keys())}' logger.error(to_log) # If we are outputting JSON, capture all standard output. If we are outputting to stdout, we block typical stdout # output. if outputting_json or output_to_sarif: StandardOutputCapture.enable(outputting_json_stdout or outputting_sarif_stdout) printer_classes = choose_printers(args, all_printer_classes) detector_classes = choose_detectors(args, all_detector_classes) default_log = logging.INFO if not args.debug else logging.DEBUG for (l_name, l_level) in [ ("Slither", default_log), ("Contract", default_log), ("Function", default_log), ("Node", default_log), ("Parsing", default_log), ("Detectors", default_log), ("FunctionSolc", default_log), ("ExpressionParsing", default_log), ("TypeParsing", default_log), ("SSA_Conversion", default_log), ("Printers", default_log), # ('CryticCompile', default_log) ]: logger_level = logging.getLogger(l_name) logger_level.setLevel(l_level) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(FormatterCryticCompile()) crytic_compile_error = logging.getLogger(("CryticCompile")) crytic_compile_error.addHandler(console_handler) crytic_compile_error.propagate = False crytic_compile_error.setLevel(logging.INFO) results_detectors: List[Dict] = [] results_printers: List[Dict] = [] try: filename = args.filename # Determine if we are handling ast from solc if args.solc_ast or (filename.endswith(".json") and not is_supported(filename)): globbed_filenames = glob.glob(filename, recursive=True) filenames = glob.glob(os.path.join(filename, "*.json")) if not filenames: filenames = globbed_filenames number_contracts = 0 slither_instances = [] if args.splitted: ( slither_instance, results_detectors, results_printers, number_contracts, ) = process_from_asts(filenames, args, detector_classes, printer_classes) slither_instances.append(slither_instance) else: for filename in filenames: ( slither_instance, results_detectors_tmp, results_printers_tmp, number_contracts_tmp, ) = process_single(filename, args, detector_classes, printer_classes) number_contracts += number_contracts_tmp results_detectors += results_detectors_tmp results_printers += results_printers_tmp slither_instances.append(slither_instance) # Rely on CryticCompile to discern the underlying type of compilations. else: ( slither_instances, results_detectors, results_printers, number_contracts, ) = process_all(filename, args, detector_classes, printer_classes) # Determine if we are outputting JSON if outputting_json or outputting_zip or output_to_sarif: # Add our compilation information to JSON if "compilations" in args.json_types: compilation_results = [] for slither_instance in slither_instances: assert slither_instance.crytic_compile compilation_results.append( generate_standard_export(slither_instance.crytic_compile) ) json_results["compilations"] = compilation_results # Add our detector results to JSON if desired. if results_detectors and "detectors" in args.json_types: json_results["detectors"] = results_detectors # Add our printer results to JSON if desired. if results_printers and "printers" in args.json_types: json_results["printers"] = results_printers # Add our detector types to JSON if "list-detectors" in args.json_types: detectors, _ = get_detectors_and_printers() json_results["list-detectors"] = output_detectors_json(detectors) # Add our detector types to JSON if "list-printers" in args.json_types: _, printers = get_detectors_and_printers() json_results["list-printers"] = output_printers_json(printers) # Output our results to markdown if we wish to compile a checklist. if args.checklist: output_results_to_markdown(results_detectors, args.checklist_limit) # Don't print the number of result for printers if number_contracts == 0: logger.warning(red("No contract was analyzed")) if printer_classes: logger.info("%s analyzed (%d contracts)", filename, number_contracts) else: logger.info( "%s analyzed (%d contracts with %d detectors), %d result(s) found", filename, number_contracts, len(detector_classes), len(results_detectors), ) except SlitherException as slither_exception: output_error = str(slither_exception) traceback.print_exc() logging.error(red("Error:")) logging.error(red(output_error)) logging.error("Please report an issue to https://github.com/crytic/slither/issues") except Exception: # pylint: disable=broad-except output_error = traceback.format_exc() traceback.print_exc() logging.error(f"Error in {args.filename}") # pylint: disable=logging-fstring-interpolation logging.error(output_error) # If we are outputting JSON, capture the redirected output and disable the redirect to output the final JSON. if outputting_json: if "console" in args.json_types: json_results["console"] = { "stdout": StandardOutputCapture.get_stdout_output(), "stderr": StandardOutputCapture.get_stderr_output(), } StandardOutputCapture.disable() output_to_json(None if outputting_json_stdout else args.json, output_error, json_results) if outputting_sarif: StandardOutputCapture.disable() output_to_sarif( None if outputting_sarif_stdout else args.sarif, json_results, detector_classes ) if outputting_zip: output_to_zip(args.zip, output_error, json_results, args.zip_type) if args.perf and cp: cp.disable() stats = pstats.Stats(cp).sort_stats("cumtime") stats.print_stats() if args.fail_high: fail_on_detection = any(result["impact"] == "High" for result in results_detectors) elif args.fail_medium: fail_on_detection = any( result["impact"] in ["Medium", "High"] for result in results_detectors ) elif args.fail_low: fail_on_detection = any( result["impact"] in ["Low", "Medium", "High"] for result in results_detectors ) elif args.fail_pedantic: fail_on_detection = bool(results_detectors) else: fail_on_detection = False # Exit with them appropriate status code if output_error or fail_on_detection: sys.exit(-1) else: sys.exit(0) if __name__ == "__main__": main() # endregion
34,129
Python
.py
811
34.355117
151
0.62083
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,976
all_exceptions.py
NioTheFirst_ScType/slither/all_exceptions.py
""" This module import all slither exceptions """ # pylint: disable=unused-import from slither.slithir.exceptions import SlithIRError from slither.solc_parsing.exceptions import ParsingError, VariableNotFound from slither.core.exceptions import SlitherCoreError from slither.exceptions import SlitherException
310
Python
.py
8
37.75
74
0.870861
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,977
slither.py
NioTheFirst_ScType/slither/slither.py
import logging from typing import Union, List, ValuesView, Type, Dict from crytic_compile import CryticCompile, InvalidCompilation # pylint: disable= no-name-in-module from slither.core.compilation_unit import SlitherCompilationUnit from slither.core.scope.scope import FileScope from slither.core.slither_core import SlitherCore from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification from slither.exceptions import SlitherError from slither.printers.abstract_printer import AbstractPrinter from slither.solc_parsing.slither_compilation_unit_solc import SlitherCompilationUnitSolc logger = logging.getLogger("Slither") logging.basicConfig() logger_detector = logging.getLogger("Detectors") logger_printer = logging.getLogger("Printers") def _check_common_things( thing_name: str, cls: Type, base_cls: Type, instances_list: List[Type[AbstractDetector]] ) -> None: if not issubclass(cls, base_cls) or cls is base_cls: raise Exception( f"You can't register {cls!r} as a {thing_name}. You need to pass a class that inherits from {base_cls.__name__}" ) if any(type(obj) == cls for obj in instances_list): # pylint: disable=unidiomatic-typecheck raise Exception(f"You can't register {cls!r} twice.") def _update_file_scopes(candidates: ValuesView[FileScope]): """ Because solc's import allows cycle in the import We iterate until we aren't adding new information to the scope """ learned_something = False while True: for candidate in candidates: learned_something |= candidate.add_accesible_scopes() if not learned_something: break learned_something = False class Slither(SlitherCore): # pylint: disable=too-many-instance-attributes def __init__(self, target: Union[str, CryticCompile], **kwargs): """ Args: target (str | CryticCompile) Keyword Args: solc (str): solc binary location (default 'solc') disable_solc_warnings (bool): True to disable solc warnings (default false) solc_args (str): solc arguments (default '') ast_format (str): ast format (default '--ast-compact-json') filter_paths (list(str)): list of path to filter (default []) triage_mode (bool): if true, switch to triage mode (default false) exclude_dependencies (bool): if true, exclude results that are only related to dependencies generate_patches (bool): if true, patches are generated (json output only) truffle_ignore (bool): ignore truffle.js presence (default false) truffle_build_directory (str): build truffle directory (default 'build/contracts') truffle_ignore_compile (bool): do not run truffle compile (default False) truffle_version (str): use a specific truffle version (default None) embark_ignore (bool): ignore embark.js presence (default false) embark_ignore_compile (bool): do not run embark build (default False) embark_overwrite_config (bool): overwrite original config file (default false) change_line_prefix (str): Change the line prefix (default #) for the displayed source codes (i.e. file.sol#1). """ super().__init__() self._disallow_partial: bool = kwargs.get("disallow_partial", False) self._skip_assembly: bool = kwargs.get("skip_assembly", False) self._show_ignored_findings: bool = kwargs.get("show_ignored_findings", False) self.line_prefix = kwargs.get("change_line_prefix", "#") # Indicate if Codex related features should be used self.codex_enabled = kwargs.get("codex", False) self.codex_contracts = kwargs.get("codex_contracts", "all") self.codex_model = kwargs.get("codex_model", "text-davinci-003") self.codex_temperature = kwargs.get("codex_temperature", 0) self.codex_max_tokens = kwargs.get("codex_max_tokens", 300) self.codex_log = kwargs.get("codex_log", False) self.no_fail = kwargs.get("no_fail", False) self._parsers: List[SlitherCompilationUnitSolc] = [] try: if isinstance(target, CryticCompile): crytic_compile = target else: crytic_compile = CryticCompile(target, **kwargs) self._crytic_compile = crytic_compile except InvalidCompilation as e: # pylint: disable=raise-missing-from raise SlitherError(f"Invalid compilation: \n{str(e)}") for compilation_unit in crytic_compile.compilation_units.values(): compilation_unit_slither = SlitherCompilationUnit(self, compilation_unit) self._compilation_units.append(compilation_unit_slither) parser = SlitherCompilationUnitSolc(compilation_unit_slither) self._parsers.append(parser) for path, ast in compilation_unit.asts.items(): parser.parse_top_level_from_loaded_json(ast, path) self.add_source_code(path) _update_file_scopes(compilation_unit_slither.scopes.values()) if kwargs.get("generate_patches", False): self.generate_patches = True self._markdown_root = kwargs.get("markdown_root", "") self._detectors = [] self._printers = [] filter_paths = kwargs.get("filter_paths", []) for p in filter_paths: self.add_path_to_filter(p) self._exclude_dependencies = kwargs.get("exclude_dependencies", False) triage_mode = kwargs.get("triage_mode", False) self._triage_mode = triage_mode self._init_parsing_and_analyses(kwargs.get("skip_analyze", False)) def _init_parsing_and_analyses(self, skip_analyze: bool) -> None: for parser in self._parsers: try: parser.parse_contracts() except Exception as e: if self.no_fail: continue raise e # skip_analyze is only used for testing if not skip_analyze: for parser in self._parsers: try: parser.analyze_contracts() except Exception as e: if self.no_fail: continue raise e @property def detectors(self): return self._detectors @property def detectors_high(self): return [d for d in self.detectors if d.IMPACT == DetectorClassification.HIGH] @property def detectors_medium(self): return [d for d in self.detectors if d.IMPACT == DetectorClassification.MEDIUM] @property def detectors_low(self): return [d for d in self.detectors if d.IMPACT == DetectorClassification.LOW] @property def detectors_informational(self): return [d for d in self.detectors if d.IMPACT == DetectorClassification.INFORMATIONAL] @property def detectors_optimization(self): return [d for d in self.detectors if d.IMPACT == DetectorClassification.OPTIMIZATION] def register_detector(self, detector_class: Type[AbstractDetector]) -> None: """ :param detector_class: Class inheriting from `AbstractDetector`. """ _check_common_things("detector", detector_class, AbstractDetector, self._detectors) for compilation_unit in self.compilation_units: instance = detector_class(compilation_unit, self, logger_detector) self._detectors.append(instance) def register_printer(self, printer_class: Type[AbstractPrinter]) -> None: """ :param printer_class: Class inheriting from `AbstractPrinter`. """ _check_common_things("printer", printer_class, AbstractPrinter, self._printers) instance = printer_class(self, logger_printer) self._printers.append(instance) def run_detectors(self) -> List[Dict]: """ :return: List of registered detectors results. """ self.load_previous_results() results = [d.detect() for d in self._detectors] self.write_results_to_hide() return results def run_printers(self): """ :return: List of registered printers outputs. """ return [p.output(self._crytic_compile.target).data for p in self._printers] @property def triage_mode(self): return self._triage_mode
8,516
Python
.py
170
40.647059
124
0.658231
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,978
sctype_cf_pairs.py
NioTheFirst_ScType/slither/sctype_cf_pairs.py
func_ptr_hash = {} cont_ptr_hash = {} def add_cont_with_state_var(contract_name, contract): global cont_ptr_hash if(len(_read_state_variables(contract)) == 0): return else: cont_ptr_hash[contract_name] = contract def get_cont_with_state_var(contract_name): global cont_ptr_hash if(contract_name in cont_ptr_hash): return cont_ptr_hash[contract_name] return None def _read_state_variables(contract): ret = [] for f in contract.all_functions_called + contract.modifiers: ret += f.state_variables_read return ret def add_cf_pair(contract_name, function_name, function): global func_ptr_hash if(contract_name == None or function_name == None or function.entry_point == None): return False key = contract_name + '_' + function_name if(key in func_ptr_hash): return False func_ptr_hash[key] = function return True def get_cf_pairh(contract_name, function_name): global func_ptr_hash key = contract_name + '_' + function_name #print(key) #print(func_ptr_hash) if(key in func_ptr_hash): #print("Found") return func_ptr_hash[key] #print("Not found") return None def view_all_cf_pairs(): print(func_ptr_hash)
1,262
Python
.py
39
27.179487
87
0.667763
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,979
exceptions.py
NioTheFirst_ScType/slither/formatters/exceptions.py
from slither.exceptions import SlitherException class FormatImpossible(SlitherException): pass class FormatError(SlitherException): pass
149
Python
.py
5
26.4
47
0.857143
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,980
patches.py
NioTheFirst_ScType/slither/formatters/utils/patches.py
import os import difflib from typing import Dict, Tuple, Union from collections import defaultdict from slither.core.compilation_unit import SlitherCompilationUnit # pylint: disable=too-many-arguments def create_patch( result: Dict, file: str, start: int, end: int, old_str: Union[str, bytes], new_str: Union[str, bytes], ) -> None: if isinstance(old_str, bytes): old_str = old_str.decode("utf8") if isinstance(new_str, bytes): new_str = new_str.decode("utf8") p = {"start": start, "end": end, "old_string": old_str, "new_string": new_str} if "patches" not in result: result["patches"] = defaultdict(list) if p not in result["patches"][file]: result["patches"][file].append(p) def apply_patch(original_txt: bytes, patch: Dict, offset: int) -> Tuple[bytes, int]: patched_txt = original_txt[: int(patch["start"] + offset)] patched_txt += patch["new_string"].encode("utf8") patched_txt += original_txt[int(patch["end"] + offset) :] # Keep the diff of text added or sub, in case of multiple patches patch_length_diff = len(patch["new_string"]) - (patch["end"] - patch["start"]) return patched_txt, patch_length_diff + offset def create_diff( compilation_unit: SlitherCompilationUnit, original_txt: bytes, patched_txt: bytes, filename: str ) -> str: if compilation_unit.crytic_compile: relative_path = compilation_unit.crytic_compile.filename_lookup(filename).relative relative_path = os.path.join(".", relative_path) else: relative_path = filename diff = difflib.unified_diff( original_txt.decode("utf8").splitlines(False), patched_txt.decode("utf8").splitlines(False), fromfile=relative_path, tofile=relative_path, lineterm="", ) return "\n".join(list(diff)) + "\n"
1,859
Python
.py
46
35.152174
100
0.671468
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,981
naming_convention.py
NioTheFirst_ScType/slither/formatters/naming_convention/naming_convention.py
import re import logging from typing import List from slither.core.compilation_unit import SlitherCompilationUnit from slither.slithir.operations import ( Send, Transfer, OperationWithLValue, HighLevelCall, LowLevelCall, InternalCall, InternalDynamicCall, Operation, ) from slither.core.declarations import Modifier from slither.core.solidity_types import UserDefinedType, MappingType from slither.core.declarations import Enum, Contract, Structure, Function from slither.core.solidity_types.elementary_type import ElementaryTypeName from slither.core.variables.local_variable import LocalVariable from slither.formatters.exceptions import FormatError, FormatImpossible from slither.formatters.utils.patches import create_patch logging.basicConfig(level=logging.INFO) logger = logging.getLogger("Slither.Format") # pylint: disable=anomalous-backslash-in-string def custom_format(compilation_unit: SlitherCompilationUnit, result): elements = result["elements"] for element in elements: target = element["additional_fields"]["target"] convention = element["additional_fields"]["convention"] if convention == "l_O_I_should_not_be_used": # l_O_I_should_not_be_used cannot be automatically patched logger.info( f'The following naming convention cannot be patched: \n{result["description"]}' ) continue _patch(compilation_unit, result, element, target) # endregion ################################################################################### ################################################################################### # region Conventions ################################################################################### ################################################################################### KEY = "ALL_NAMES_USED" # https://solidity.readthedocs.io/en/v0.5.11/miscellaneous.html#reserved-keywords SOLIDITY_KEYWORDS = [ "abstract", "after", "alias", "apply", "auto", "case", "catch", "copyof", "default", "define", "final", "immutable", "implements", "in", "inline", "let", "macro", "match", "mutable", "null", "of", "override", "partial", "promise", "reference", "relocatable", "sealed", "sizeof", "static", "supports", "switch", "try", "typedef", "typeof", "unchecked", ] # https://solidity.readthedocs.io/en/v0.5.11/miscellaneous.html#language-grammar SOLIDITY_KEYWORDS += [ "pragma", "import", "contract", "library", "contract", "function", "using", "struct", "enum", "public", "private", "internal", "external", "calldata", "memory", "modifier", "view", "pure", "constant", "storage", "for", "if", "while", "break", "return", "throw", "else", "type", ] SOLIDITY_KEYWORDS += ElementaryTypeName def _name_already_use(slither, name): # Do not convert to a name used somewhere else if not KEY in slither.context: all_names = set() for contract in slither.contracts_derived: all_names = all_names.union({st.name for st in contract.structures}) all_names = all_names.union({f.name for f in contract.functions_and_modifiers}) all_names = all_names.union({e.name for e in contract.enums}) all_names = all_names.union({s.name for s in contract.state_variables}) for function in contract.functions: all_names = all_names.union({v.name for v in function.variables}) slither.context[KEY] = all_names return name in slither.context[KEY] def _convert_CapWords(original_name, slither): name = original_name.capitalize() while "_" in name: offset = name.find("_") if len(name) > offset: name = name[0:offset] + name[offset + 1].upper() + name[offset + 1 :] if _name_already_use(slither, name): raise FormatImpossible(f"{original_name} cannot be converted to {name} (already used)") if name in SOLIDITY_KEYWORDS: raise FormatImpossible(f"{original_name} cannot be converted to {name} (Solidity keyword)") return name def _convert_mixedCase(original_name, compilation_unit: SlitherCompilationUnit): name = original_name if isinstance(name, bytes): name = name.decode("utf8") while "_" in name: offset = name.find("_") if len(name) > offset: name = name[0:offset] + name[offset + 1].upper() + name[offset + 2 :] name = name[0].lower() + name[1:] if _name_already_use(compilation_unit, name): raise FormatImpossible(f"{original_name} cannot be converted to {name} (already used)") if name in SOLIDITY_KEYWORDS: raise FormatImpossible(f"{original_name} cannot be converted to {name} (Solidity keyword)") return name def _convert_UPPER_CASE_WITH_UNDERSCORES(name, compilation_unit: SlitherCompilationUnit): if _name_already_use(compilation_unit, name.upper()): raise FormatImpossible(f"{name} cannot be converted to {name.upper()} (already used)") if name.upper() in SOLIDITY_KEYWORDS: raise FormatImpossible(f"{name} cannot be converted to {name.upper()} (Solidity keyword)") return name.upper() conventions = { "CapWords": _convert_CapWords, "mixedCase": _convert_mixedCase, "UPPER_CASE_WITH_UNDERSCORES": _convert_UPPER_CASE_WITH_UNDERSCORES, } # endregion ################################################################################### ################################################################################### # region Helpers ################################################################################### ################################################################################### def _get_from_contract(compilation_unit: SlitherCompilationUnit, element, name, getter): scope = compilation_unit.get_scope(element["source_mapping"]["filename_absolute"]) contract_name = element["type_specific_fields"]["parent"]["name"] contract = scope.get_contract_from_name(contract_name) return getattr(contract, getter)(name) # endregion ################################################################################### ################################################################################### # region Patch dispatcher ################################################################################### ################################################################################### def _patch(compilation_unit: SlitherCompilationUnit, result, element, _target): scope = compilation_unit.get_scope(element["source_mapping"]["filename_absolute"]) if _target == "contract": target = scope.get_contract_from_name(element["name"]) elif _target == "structure": target = _get_from_contract( compilation_unit, element, element["name"], "get_structure_from_name" ) elif _target == "event": target = _get_from_contract( compilation_unit, element, element["name"], "get_event_from_name" ) elif _target == "function": # Avoid constructor (FP?) if element["name"] != element["type_specific_fields"]["parent"]["name"]: function_sig = element["type_specific_fields"]["signature"] target = _get_from_contract( compilation_unit, element, function_sig, "get_function_from_signature" ) elif _target == "modifier": modifier_sig = element["type_specific_fields"]["signature"] target = _get_from_contract( compilation_unit, element, modifier_sig, "get_modifier_from_signature" ) elif _target == "parameter": contract_name = element["type_specific_fields"]["parent"]["type_specific_fields"]["parent"][ "name" ] function_sig = element["type_specific_fields"]["parent"]["type_specific_fields"][ "signature" ] param_name = element["name"] contract = scope.get_contract_from_name(contract_name) function = contract.get_function_from_full_name(function_sig) target = function.get_local_variable_from_name(param_name) elif _target in ["variable", "variable_constant"]: # Local variable if element["type_specific_fields"]["parent"] == "function": contract_name = element["type_specific_fields"]["parent"]["type_specific_fields"][ "parent" ]["name"] function_sig = element["type_specific_fields"]["parent"]["type_specific_fields"][ "signature" ] var_name = element["name"] contract = scope.get_contract_from_name(contract_name) function = contract.get_function_from_full_name(function_sig) target = function.get_local_variable_from_name(var_name) # State variable else: target = _get_from_contract( compilation_unit, element, element["name"], "get_state_variable_from_name" ) elif _target == "enum": target = _get_from_contract( compilation_unit, element, element["name"], "get_enum_from_canonical_name" ) else: raise FormatError("Unknown naming convention! " + _target) _explore( compilation_unit, result, target, conventions[element["additional_fields"]["convention"]] ) # endregion ################################################################################### ################################################################################### # region Explore functions ################################################################################### ################################################################################### # group 1: beginning of the from type # group 2: beginning of the to type # nested mapping are within the group 1 # RE_MAPPING = '[ ]*mapping[ ]*\([ ]*([\=\>\(\) a-zA-Z0-9\._\[\]]*)[ ]*=>[ ]*([a-zA-Z0-9\._\[\]]*)\)' RE_MAPPING_FROM = rb"([a-zA-Z0-9\._\[\]]*)" RE_MAPPING_TO = rb"([\=\>\(\) a-zA-Z0-9\._\[\]\ ]*)" RE_MAPPING = ( rb"[ ]*mapping[ ]*\([ ]*" + RE_MAPPING_FROM + b"[ ]*" + b"=>" + b"[ ]*" + RE_MAPPING_TO + rb"\)" ) def _is_var_declaration(slither, filename, start): """ Detect usage of 'var ' for Solidity < 0.5 :param slither: :param filename: :param start: :return: """ v = "var " return slither.source_code[filename][start : start + len(v)] == v def _explore_type( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches slither, result, target, convert, custom_type, filename_source_code, start, end ): if isinstance(custom_type, UserDefinedType): # Patch type based on contract/enum if isinstance(custom_type.type, (Enum, Contract)): if custom_type.type == target: old_str = custom_type.type.name new_str = convert(old_str, slither) loc_start = start if _is_var_declaration(slither, filename_source_code, start): loc_end = loc_start + len("var") else: loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) else: # Patch type based on structure assert isinstance(custom_type.type, Structure) if custom_type.type == target: old_str = custom_type.type.name new_str = convert(old_str, slither) loc_start = start if _is_var_declaration(slither, filename_source_code, start): loc_end = loc_start + len("var") else: loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) # Structure contain a list of elements, that might need patching # .elems return a list of VariableStructure _explore_variables_declaration( slither, custom_type.type.elems.values(), result, target, convert ) if isinstance(custom_type, MappingType): # Mapping has three steps: # Convert the "from" type # Convert the "to" type # Convert nested type in the "to" # Ex: mapping (mapping (badName => uint) => uint) # Do the comparison twice, so we can factor together the re matching # mapping can only have elementary type in type_from if isinstance(custom_type.type_to, (UserDefinedType, MappingType)) or target in [ custom_type.type_from, custom_type.type_to, ]: full_txt_start = start full_txt_end = end full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] re_match = re.match(RE_MAPPING, full_txt) assert re_match if custom_type.type_from == target: old_str = custom_type.type_from.name new_str = convert(old_str, slither) loc_start = start + re_match.start(1) loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) if custom_type.type_to == target: old_str = custom_type.type_to.name new_str = convert(old_str, slither) loc_start = start + re_match.start(2) loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) if isinstance(custom_type.type_to, (UserDefinedType, MappingType)): loc_start = start + re_match.start(2) loc_end = start + re_match.end(2) _explore_type( slither, result, target, convert, custom_type.type_to, filename_source_code, loc_start, loc_end, ) def _explore_variables_declaration( # pylint: disable=too-many-arguments,too-many-locals,too-many-nested-blocks slither, variables, result, target, convert, patch_comment=False ): for variable in variables: # First explore the type of the variable filename_source_code = variable.source_mapping.filename.absolute full_txt_start = variable.source_mapping.start full_txt_end = full_txt_start + variable.source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] _explore_type( slither, result, target, convert, variable.type, filename_source_code, full_txt_start, variable.source_mapping.start + variable.source_mapping.length, ) # If the variable is the target if variable == target: old_str = variable.name new_str = convert(old_str, slither) loc_start = full_txt_start + full_txt.find(old_str.encode("utf8")) loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) # Patch comment only makes sense for local variable declaration in the parameter list if patch_comment and isinstance(variable, LocalVariable): if variable.source_mapping.lines: func = variable.function end_line = func.source_mapping.lines[0] if variable in func.parameters: idx = len(func.parameters) - func.parameters.index(variable) + 1 first_line = end_line - idx - 2 potential_comments = slither.source_code[filename_source_code].encode( "utf8" ) potential_comments = potential_comments.splitlines(keepends=True)[ first_line : end_line - 1 ] idx_beginning = func.source_mapping.start idx_beginning += -func.source_mapping.starting_column + 1 idx_beginning += -sum([len(c) for c in potential_comments]) old_comment = f"@param {old_str}".encode("utf8") for line in potential_comments: idx = line.find(old_comment) if idx >= 0: loc_start = idx + idx_beginning loc_end = loc_start + len(old_comment) new_comment = f"@param {new_str}".encode("utf8") create_patch( result, filename_source_code, loc_start, loc_end, old_comment, new_comment, ) break idx_beginning += len(line) def _explore_structures_declaration(slither, structures, result, target, convert): for st in structures: # Explore the variable declared within the structure (VariableStructure) _explore_variables_declaration(slither, st.elems.values(), result, target, convert) # If the structure is the target if st == target: old_str = st.name new_str = convert(old_str, slither) filename_source_code = st.source_mapping.filename.absolute full_txt_start = st.source_mapping.start full_txt_end = full_txt_start + st.source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] # The name is after the space matches = re.finditer(b"struct[ ]*", full_txt) # Look for the end offset of the largest list of ' ' loc_start = full_txt_start + max(matches, key=lambda x: len(x.group())).end() loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) def _explore_events_declaration(slither, events, result, target, convert): for event in events: # Explore the parameters _explore_variables_declaration(slither, event.elems, result, target, convert) # If the event is the target if event == target: filename_source_code = event.source_mapping.filename.absolute old_str = event.name new_str = convert(old_str, slither) loc_start = event.source_mapping.start loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) def get_ir_variables(ir): all_vars = ir.read if isinstance(ir, (InternalCall, InternalDynamicCall, HighLevelCall)): all_vars += [ir.function] if isinstance(ir, (HighLevelCall, Send, LowLevelCall, Transfer)): all_vars += [ir.call_value] if isinstance(ir, (HighLevelCall, LowLevelCall)): all_vars += [ir.call_gas] if isinstance(ir, OperationWithLValue): all_vars += [ir.lvalue] return [v for v in all_vars if v] def _explore_irs(slither, irs: List[Operation], result, target, convert): # pylint: disable=too-many-locals if irs is None: return for ir in irs: for v in get_ir_variables(ir): if target == v or ( isinstance(target, Function) and isinstance(v, Function) and v.canonical_name == target.canonical_name ): source_mapping = ir.expression.source_mapping filename_source_code = source_mapping.filename.absolute full_txt_start = source_mapping.start full_txt_end = full_txt_start + source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] if not target.name.encode("utf8") in full_txt: raise FormatError(f"{target} not found in {full_txt} ({source_mapping}") old_str = target.name.encode("utf8") new_str = convert(old_str, slither) counter = 0 # Can be found multiple time on the same IR # We patch one by one while old_str in full_txt: target_found_at = full_txt.find((old_str)) full_txt = full_txt[target_found_at + 1 :] counter += target_found_at loc_start = full_txt_start + counter loc_end = loc_start + len(old_str) create_patch( result, filename_source_code, loc_start, loc_end, old_str, new_str, ) def _explore_functions(slither, functions, result, target, convert): for function in functions: _explore_variables_declaration(slither, function.variables, result, target, convert, True) _explore_irs(slither, function.all_slithir_operations(), result, target, convert) if isinstance(target, Function) and function.canonical_name == target.canonical_name: old_str = function.name new_str = convert(old_str, slither) filename_source_code = function.source_mapping.filename.absolute full_txt_start = function.source_mapping.start full_txt_end = full_txt_start + function.source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] # The name is after the space if isinstance(target, Modifier): matches = re.finditer(b"modifier([ ]*)", full_txt) else: matches = re.finditer(b"function([ ]*)", full_txt) # Look for the end offset of the largest list of ' ' loc_start = full_txt_start + max(matches, key=lambda x: len(x.group())).end() loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) def _explore_enums(slither, enums, result, target, convert): for enum in enums: if enum == target: old_str = enum.name new_str = convert(old_str, slither) filename_source_code = enum.source_mapping.filename.absolute full_txt_start = enum.source_mapping.start full_txt_end = full_txt_start + enum.source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] # The name is after the space matches = re.finditer(b"enum([ ]*)", full_txt) # Look for the end offset of the largest list of ' ' loc_start = full_txt_start + max(matches, key=lambda x: len(x.group())).end() loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) def _explore_contract(slither, contract, result, target, convert): _explore_variables_declaration(slither, contract.state_variables, result, target, convert) _explore_structures_declaration(slither, contract.structures, result, target, convert) _explore_functions(slither, contract.functions_and_modifiers, result, target, convert) _explore_enums(slither, contract.enums, result, target, convert) if contract == target: filename_source_code = contract.source_mapping.filename.absolute full_txt_start = contract.source_mapping.start full_txt_end = full_txt_start + contract.source_mapping.length full_txt = slither.source_code[filename_source_code].encode("utf8")[ full_txt_start:full_txt_end ] old_str = contract.name new_str = convert(old_str, slither) # The name is after the space matches = re.finditer(b"contract[ ]*", full_txt) # Look for the end offset of the largest list of ' ' loc_start = full_txt_start + max(matches, key=lambda x: len(x.group())).end() loc_end = loc_start + len(old_str) create_patch(result, filename_source_code, loc_start, loc_end, old_str, new_str) def _explore(compilation_unit: SlitherCompilationUnit, result, target, convert): for contract in compilation_unit.contracts_derived: _explore_contract(compilation_unit, contract, result, target, convert) # endregion
25,464
Python
.py
549
35.918033
112
0.566125
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,982
constant_pragma.py
NioTheFirst_ScType/slither/formatters/attributes/constant_pragma.py
import re from slither.formatters.exceptions import FormatImpossible from slither.formatters.utils.patches import create_patch # Indicates the recommended versions for replacement REPLACEMENT_VERSIONS = ["^0.4.25", "^0.5.3"] # pylint: disable=anomalous-backslash-in-string # group: # 0: ^ > >= < <= (optional) # 1: ' ' (optional) # 2: version number # 3: version number # 4: version number PATTERN = re.compile(r"(\^|>|>=|<|<=)?([ ]+)?(\d+)\.(\d+)\.(\d+)") def custom_format(slither, result): elements = result["elements"] versions_used = [] for element in elements: versions_used.append("".join(element["type_specific_fields"]["directive"][1:])) solc_version_replace = _analyse_versions(versions_used) for element in elements: _patch( slither, result, element["source_mapping"]["filename_absolute"], solc_version_replace, element["source_mapping"]["start"], element["source_mapping"]["start"] + element["source_mapping"]["length"], ) def _analyse_versions(used_solc_versions): replace_solc_versions = [] for version in used_solc_versions: replace_solc_versions.append(_determine_solc_version_replacement(version)) if not all(version == replace_solc_versions[0] for version in replace_solc_versions): raise FormatImpossible("Multiple incompatible versions!") return replace_solc_versions[0] def _determine_solc_version_replacement(used_solc_version): versions = PATTERN.findall(used_solc_version) if len(versions) == 1: version = versions[0] minor_version = ".".join(version[2:])[2] if minor_version == "4": return "pragma solidity " + REPLACEMENT_VERSIONS[0] + ";" if minor_version == "5": return "pragma solidity " + REPLACEMENT_VERSIONS[1] + ";" raise FormatImpossible("Unknown version!") if len(versions) == 2: version_right = versions[1] minor_version_right = ".".join(version_right[2:])[2] if minor_version_right == "4": # Replace with 0.4.25 return "pragma solidity " + REPLACEMENT_VERSIONS[0] + ";" if minor_version_right in ["5", "6"]: # Replace with 0.5.3 return "pragma solidity " + REPLACEMENT_VERSIONS[1] + ";" raise FormatImpossible("Unknown version!") def _patch( slither, result, in_file, pragma, modify_loc_start, modify_loc_end ): # pylint: disable=too-many-arguments in_file_str = slither.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:modify_loc_end] create_patch( result, in_file, int(modify_loc_start), int(modify_loc_end), old_str_of_interest, pragma, )
2,805
Python
.py
68
34.558824
89
0.639765
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,983
incorrect_solc.py
NioTheFirst_ScType/slither/formatters/attributes/incorrect_solc.py
import re from slither.formatters.exceptions import FormatImpossible from slither.formatters.utils.patches import create_patch # Indicates the recommended versions for replacement REPLACEMENT_VERSIONS = ["^0.4.25", "^0.5.3"] # group: # 0: ^ > >= < <= (optional) # 1: ' ' (optional) # 2: version number # 3: version number # 4: version number PATTERN = re.compile(r"(\^|>|>=|<|<=)?([ ]+)?(\d+)\.(\d+)\.(\d+)") def custom_format(slither, result): elements = result["elements"] for element in elements: solc_version_replace = _determine_solc_version_replacement( "".join(element["type_specific_fields"]["directive"][1:]) ) _patch( slither, result, element["source_mapping"]["filename_absolute"], solc_version_replace, element["source_mapping"]["start"], element["source_mapping"]["start"] + element["source_mapping"]["length"], ) def _determine_solc_version_replacement(used_solc_version): versions = PATTERN.findall(used_solc_version) if len(versions) == 1: version = versions[0] minor_version = ".".join(version[2:])[2] if minor_version == "4": # Replace with 0.4.25 return "pragma solidity " + REPLACEMENT_VERSIONS[0] + ";" if minor_version == "5": # Replace with 0.5.3 return "pragma solidity " + REPLACEMENT_VERSIONS[1] + ";" raise FormatImpossible(f"Unknown version {versions}") if len(versions) == 2: version_right = versions[1] minor_version_right = ".".join(version_right[2:])[2] if minor_version_right == "4": # Replace with 0.4.25 return "pragma solidity " + REPLACEMENT_VERSIONS[0] + ";" if minor_version_right in ["5", "6"]: # Replace with 0.5.3 return "pragma solidity " + REPLACEMENT_VERSIONS[1] + ";" return None # pylint: disable=too-many-arguments def _patch(slither, result, in_file, solc_version, modify_loc_start, modify_loc_end): in_file_str = slither.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:modify_loc_end] create_patch( result, in_file, int(modify_loc_start), int(modify_loc_end), old_str_of_interest, solc_version, )
2,369
Python
.py
60
32.1
85
0.60792
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,984
const_functions.py
NioTheFirst_ScType/slither/formatters/attributes/const_functions.py
import re from slither.core.compilation_unit import SlitherCompilationUnit from slither.formatters.exceptions import FormatError from slither.formatters.utils.patches import create_patch def custom_format(compilation_unit: SlitherCompilationUnit, result): for file_scope in compilation_unit.scopes.values(): elements = result["elements"] for element in elements: if element["type"] != "function": # Skip variable elements continue target_contract = file_scope.get_contract_from_name( element["type_specific_fields"]["parent"]["name"] ) if target_contract: function = target_contract.get_function_from_full_name( element["type_specific_fields"]["signature"] ) if function: _patch( compilation_unit, result, element["source_mapping"]["filename_absolute"], int( function.parameters_src().source_mapping.start + function.parameters_src().source_mapping.length ), int(function.returns_src().source_mapping.start), ) def _patch( compilation_unit: SlitherCompilationUnit, result, in_file, modify_loc_start, modify_loc_end ): in_file_str = compilation_unit.core.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:modify_loc_end] # Find the keywords view|pure|constant and remove them m = re.search("(view|pure|constant)", old_str_of_interest.decode("utf-8")) if m: create_patch( result, in_file, modify_loc_start + m.span()[0], modify_loc_start + m.span()[1], m.groups(0)[0], # this is view|pure|constant "", ) else: raise FormatError( "No view/pure/constant specifier exists. Regex failed to remove specifier!" )
2,116
Python
.py
49
30.489796
95
0.57226
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,985
external_function.py
NioTheFirst_ScType/slither/formatters/functions/external_function.py
import re from slither.core.compilation_unit import SlitherCompilationUnit from slither.formatters.utils.patches import create_patch def custom_format(compilation_unit: SlitherCompilationUnit, result): for file_scope in compilation_unit.scopes.values(): elements = result["elements"] for element in elements: target_contract = file_scope.get_contract_from_name( element["type_specific_fields"]["parent"]["name"] ) if target_contract: function = target_contract.get_function_from_full_name( element["type_specific_fields"]["signature"] ) if function: _patch( file_scope, result, element["source_mapping"]["filename_absolute"], int(function.parameters_src().source_mapping.start), int(function.returns_src().source_mapping.start), ) def _patch( compilation_unit: SlitherCompilationUnit, result, in_file, modify_loc_start, modify_loc_end ): in_file_str = compilation_unit.core.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:modify_loc_end] # Search for 'public' keyword which is in-between the function name and modifier name (if present) # regex: 'public' could have spaces around or be at the end of the line m = re.search(r"((\spublic)\s+)|(\spublic)$|(\)public)$", old_str_of_interest.decode("utf-8")) if m is None: # No visibility specifier exists; public by default. create_patch( result, in_file, # start after the function definition's closing paranthesis modify_loc_start + len(old_str_of_interest.decode("utf-8").split(")")[0]) + 1, # end is same as start because we insert the keyword `external` at that location modify_loc_start + len(old_str_of_interest.decode("utf-8").split(")")[0]) + 1, "", " external", ) # replace_text is `external` else: create_patch( result, in_file, # start at the keyword `public` modify_loc_start + m.span()[0] + 1, # end after the keyword `public` = start + len('public'') modify_loc_start + m.span()[0] + 1 + len("public"), "public", "external", )
2,497
Python
.py
53
35.377358
102
0.582206
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,986
unchanged_state_variables.py
NioTheFirst_ScType/slither/formatters/variables/unchanged_state_variables.py
import re from slither.core.compilation_unit import SlitherCompilationUnit from slither.formatters.exceptions import FormatError, FormatImpossible from slither.formatters.utils.patches import create_patch def custom_format(compilation_unit: SlitherCompilationUnit, result, attribute: str) -> None: elements = result["elements"] for element in elements: # TODO: decide if this should be changed in the constant detector contract_name = element["type_specific_fields"]["parent"]["name"] scope = compilation_unit.get_scope(element["source_mapping"]["filename_absolute"]) contract = scope.get_contract_from_name(contract_name) var = contract.get_state_variable_from_name(element["name"]) if not var.expression: raise FormatImpossible(f"{var.name} is uninitialized and cannot become {attribute}.") _patch( compilation_unit, result, element["source_mapping"]["filename_absolute"], element["name"], f"{attribute} " + element["name"], element["source_mapping"]["start"], element["source_mapping"]["start"] + element["source_mapping"]["length"], ) def _patch( # pylint: disable=too-many-arguments compilation_unit: SlitherCompilationUnit, result, in_file, match_text, replace_text, modify_loc_start, modify_loc_end, ): in_file_str = compilation_unit.core.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:modify_loc_end] # Add keyword `constant` before the variable name (new_str_of_interest, num_repl) = re.subn( match_text, replace_text, old_str_of_interest.decode("utf-8"), 1 ) if num_repl != 0: create_patch( result, in_file, modify_loc_start, modify_loc_end, old_str_of_interest, new_str_of_interest, ) else: raise FormatError("State variable not found?!")
2,027
Python
.py
49
33.510204
97
0.654315
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,987
unused_state_variables.py
NioTheFirst_ScType/slither/formatters/variables/unused_state_variables.py
from slither.core.compilation_unit import SlitherCompilationUnit from slither.formatters.utils.patches import create_patch def custom_format(compilation_unit: SlitherCompilationUnit, result): elements = result["elements"] for element in elements: if element["type"] == "variable": _patch( compilation_unit, result, element["source_mapping"]["filename_absolute"], element["source_mapping"]["start"], ) def _patch(compilation_unit: SlitherCompilationUnit, result, in_file, modify_loc_start): in_file_str = compilation_unit.core.source_code[in_file].encode("utf8") old_str_of_interest = in_file_str[modify_loc_start:] old_str = ( old_str_of_interest.decode("utf-8").partition(";")[0] + old_str_of_interest.decode("utf-8").partition(";")[1] ) create_patch( result, in_file, int(modify_loc_start), # Remove the entire declaration until the semicolon int(modify_loc_start + len(old_str_of_interest.decode("utf-8").partition(";")[0]) + 1), old_str, "", )
1,153
Python
.py
28
32.857143
95
0.628571
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,988
all_printers.py
NioTheFirst_ScType/slither/printers/all_printers.py
# pylint: disable=unused-import,relative-beyond-top-level from .summary.function import FunctionSummary from .summary.contract import ContractSummary from .inheritance.inheritance import PrinterInheritance from .inheritance.inheritance_graph import PrinterInheritanceGraph from .call.call_graph import PrinterCallGraph from .functions.authorization import PrinterWrittenVariablesAndAuthorization from .summary.slithir import PrinterSlithIR from .summary.slithir_ssa import PrinterSlithIRSSA from .summary.human_summary import PrinterHumanSummary from .functions.cfg import CFG from .summary.function_ids import FunctionIds from .summary.variable_order import VariableOrder from .summary.data_depenency import DataDependency from .summary.modifier_calls import Modifiers from .summary.require_calls import RequireOrAssert from .summary.constructor_calls import ConstructorPrinter from .guidance.echidna import Echidna from .summary.evm import PrinterEVM from .summary.when_not_paused import PrinterWhenNotPaused from .summary.declaration import Declaration from .functions.dominator import Dominator
1,099
Python
.py
22
48.954545
76
0.88208
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,989
abstract_printer.py
NioTheFirst_ScType/slither/printers/abstract_printer.py
import abc from logging import Logger from typing import TYPE_CHECKING, Union, List, Optional, Dict from slither.utils import output from slither.utils.output import SupportedOutput if TYPE_CHECKING: from slither import Slither class IncorrectPrinterInitialization(Exception): pass class AbstractPrinter(metaclass=abc.ABCMeta): ARGUMENT = "" # run the printer with slither.py --ARGUMENT HELP = "" # help information WIKI = "" def __init__(self, slither: "Slither", logger: Logger) -> None: self.slither = slither self.contracts = slither.contracts self.filename = slither.filename self.logger = logger if not self.HELP: raise IncorrectPrinterInitialization( f"HELP is not initialized {self.__class__.__name__}" ) if not self.ARGUMENT: raise IncorrectPrinterInitialization( f"ARGUMENT is not initialized {self.__class__.__name__}" ) if not self.WIKI: raise IncorrectPrinterInitialization( f"WIKI is not initialized {self.__class__.__name__}" ) def info(self, info: str) -> None: if self.logger: self.logger.info(info) def generate_output( self, info: Union[str, List[Union[str, SupportedOutput]]], additional_fields: Optional[Dict] = None, ) -> output.Output: if additional_fields is None: additional_fields = {} printer_output = output.Output(info, additional_fields) printer_output.data["printer"] = self.ARGUMENT return printer_output @abc.abstractmethod def output(self, filename: str) -> output.Output: pass
1,746
Python
.py
46
29.73913
72
0.64133
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,990
modifier_calls.py
NioTheFirst_ScType/slither/printers/summary/modifier_calls.py
""" Module printing summary of the contract """ from slither.core.declarations import Function from slither.printers.abstract_printer import AbstractPrinter from slither.utils.myprettytable import MyPrettyTable class Modifiers(AbstractPrinter): ARGUMENT = "modifiers" HELP = "Print the modifiers called by each function" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#modifiers" def output(self, _filename): """ _filename is not used Args: _filename(string) """ all_txt = "" all_tables = [] for contract in self.slither.contracts_derived: txt = f"\nContract {contract.name}" table = MyPrettyTable(["Function", "Modifiers"]) for function in contract.functions: modifiers = function.modifiers for call in function.all_internal_calls(): if isinstance(call, Function): modifiers += call.modifiers for (_, call) in function.all_library_calls(): if isinstance(call, Function): modifiers += call.modifiers table.add_row([function.name, sorted([m.name for m in set(modifiers)])]) txt += "\n" + str(table) self.info(txt) all_txt += txt all_tables.append((contract.name, table)) res = self.generate_output(all_txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
1,578
Python
.py
38
30.789474
88
0.600654
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,991
function.py
NioTheFirst_ScType/slither/printers/summary/function.py
""" Module printing summary of the contract """ from slither.printers.abstract_printer import AbstractPrinter from slither.utils.myprettytable import MyPrettyTable class FunctionSummary(AbstractPrinter): ARGUMENT = "function-summary" HELP = "Print a summary of the functions" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#function-summary" @staticmethod def _convert(l): if l: n = 2 l = [l[i : i + n] for i in range(0, len(l), n)] l = [str(x) for x in l] return "\n".join(l) return str(l) def output(self, _filename): # pylint: disable=too-many-locals """ _filename is not used Args: _filename(string) """ all_tables = [] all_txt = "" for c in self.contracts: if c.is_top_level: continue (name, inheritance, var, func_summaries, modif_summaries) = c.get_summary() txt = f"\nContract {name}" txt += "\nContract vars: " + str(var) txt += "\nInheritance:: " + str(inheritance) table = MyPrettyTable( [ "Function", "Visibility", "Modifiers", "Read", "Write", "Internal Calls", "External Calls", ] ) for ( _c_name, f_name, visi, modifiers, read, write, internal_calls, external_calls, ) in func_summaries: read = self._convert(sorted(read)) write = self._convert(sorted(write)) internal_calls = self._convert(sorted(internal_calls)) external_calls = self._convert(sorted(external_calls)) table.add_row( [ f_name, visi, sorted(modifiers), read, write, internal_calls, external_calls, ] ) txt += "\n \n" + str(table) table = MyPrettyTable( [ "Modifiers", "Visibility", "Read", "Write", "Internal Calls", "External Calls", ] ) for ( _c_name, f_name, visi, _, read, write, internal_calls, external_calls, ) in modif_summaries: read = self._convert(sorted(read)) write = self._convert(sorted(write)) internal_calls = self._convert(sorted(internal_calls)) external_calls = self._convert(sorted(external_calls)) table.add_row([f_name, visi, read, write, internal_calls, external_calls]) txt += "\n\n" + str(table) txt += "\n" self.info(txt) all_tables.append((name, table)) all_txt += txt res = self.generate_output(all_txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
3,534
Python
.py
103
19.563107
95
0.43931
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,992
human_summary.py
NioTheFirst_ScType/slither/printers/summary/human_summary.py
""" Module printing summary of the contract """ import logging from pathlib import Path from typing import Tuple, List, Dict from slither.core.declarations import SolidityFunction, Function from slither.core.variables.state_variable import StateVariable from slither.printers.abstract_printer import AbstractPrinter from slither.slithir.operations import ( LowLevelCall, HighLevelCall, Transfer, Send, SolidityCall, ) from slither.utils import output from slither.utils.code_complexity import compute_cyclomatic_complexity from slither.utils.colors import green, red, yellow from slither.utils.myprettytable import MyPrettyTable from slither.utils.standard_libraries import is_standard_library from slither.core.cfg.node import NodeType from slither.utils.tests_pattern import is_test_file class PrinterHumanSummary(AbstractPrinter): ARGUMENT = "human-summary" HELP = "Print a human-readable summary of the contracts" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary" @staticmethod def _get_summary_erc20(contract): functions_name = [f.name for f in contract.functions] state_variables = [v.name for v in contract.state_variables] pause = "pause" in functions_name if "mint" in functions_name: if "mintingFinished" in state_variables: mint_unlimited = False else: mint_unlimited = True else: mint_unlimited = None # no minting race_condition_mitigated = ( "increaseApproval" in functions_name or "safeIncreaseAllowance" in functions_name ) return pause, mint_unlimited, race_condition_mitigated def get_summary_erc20(self, contract): txt = "" pause, mint_unlimited, race_condition_mitigated = self._get_summary_erc20(contract) if pause: txt += yellow("Pausable") + "\n" if mint_unlimited is None: txt += green("No Minting") + "\n" else: if mint_unlimited: txt += red("∞ Minting") + "\n" else: txt += yellow("Minting") + "\n" if not race_condition_mitigated: txt += red("Approve Race Cond.") + "\n" return txt def _get_detectors_result(self) -> Tuple[List[Dict], int, int, int, int, int]: # disable detectors logger logger = logging.getLogger("Detectors") logger.setLevel(logging.ERROR) checks_optimization = self.slither.detectors_optimization checks_informational = self.slither.detectors_informational checks_low = self.slither.detectors_low checks_medium = self.slither.detectors_medium checks_high = self.slither.detectors_high issues_optimization = [c.detect() for c in checks_optimization] issues_optimization = [c for c in issues_optimization if c] issues_optimization = [item for sublist in issues_optimization for item in sublist] issues_informational = [c.detect() for c in checks_informational] issues_informational = [c for c in issues_informational if c] issues_informational = [item for sublist in issues_informational for item in sublist] issues_low = [c.detect() for c in checks_low] issues_low = [c for c in issues_low if c] issues_low = [item for sublist in issues_low for item in sublist] issues_medium = (c.detect() for c in checks_medium) issues_medium = [c for c in issues_medium if c] issues_medium = [item for sublist in issues_medium for item in sublist] issues_high = [c.detect() for c in checks_high] issues_high = [c for c in issues_high if c] issues_high = [item for sublist in issues_high for item in sublist] all_results = ( issues_optimization + issues_informational + issues_low + issues_medium + issues_high ) return ( all_results, len(issues_optimization), len(issues_informational), len(issues_low), len(issues_medium), len(issues_high), ) def get_detectors_result(self) -> Tuple[str, List[Dict], int, int, int, int, int]: ( all_results, optimization, informational, low, medium, high, ) = self._get_detectors_result() txt = f"Number of optimization issues: {green(optimization)}\n" txt += f"Number of informational issues: {green(informational)}\n" txt += f"Number of low issues: {green(low)}\n" if medium > 0: txt += f"Number of medium issues: {yellow(medium)}\n" else: txt += f"Number of medium issues: {green(medium)}\n" if high > 0: txt += f"Number of high issues: {red(high)}\n" else: txt += f"Number of high issues: {green(high)}\n\n" return txt, all_results, optimization, informational, low, medium, high @staticmethod def _is_complex_code(contract): for f in contract.functions: if compute_cyclomatic_complexity(f) > 7: return True return False def is_complex_code(self, contract): """ Check if the code is complex Heuristic, the code is complex if: - One function has a cyclomatic complexity > 7 Args: contract """ is_complex = self._is_complex_code(contract) result = red("Yes") if is_complex else green("No") return result @staticmethod def _number_functions(contract): return len(contract.functions) def _lines_number(self): if not self.slither.source_code: return None total_dep_lines = 0 total_lines = 0 total_tests_lines = 0 for filename, source_code in self.slither.source_code.items(): lines = len(source_code.splitlines()) is_dep = False if self.slither.crytic_compile: is_dep = self.slither.crytic_compile.is_dependency(filename) if is_dep: total_dep_lines += lines else: if is_test_file(Path(filename)): total_tests_lines += lines else: total_lines += lines return total_lines, total_dep_lines, total_tests_lines def _get_number_of_assembly_lines(self): total_asm_lines = 0 for contract in self.contracts: for function in contract.functions_declared: for node in function.nodes: if node.type == NodeType.ASSEMBLY: inline_asm = node.inline_asm if inline_asm: total_asm_lines += len(inline_asm.splitlines()) return total_asm_lines def _compilation_type(self): if self.slither.crytic_compile is None: return "Compilation non standard\n" return f"Compiled with {str(self.slither.crytic_compile.type)}\n" def _number_contracts(self): if self.slither.crytic_compile is None: return len(self.slither.contracts), 0, 0 contracts = [c for c in self.slither.contracts if not c.is_top_level] deps = [c for c in contracts if c.is_from_dependency()] tests = [c for c in contracts if c.is_test] return len(contracts) - len(deps) - len(tests), len(deps), len(tests) def _standard_libraries(self): libraries = [] for contract in self.contracts: lib = is_standard_library(contract) if lib: libraries.append(lib) return libraries def _ercs(self): ercs = [] for contract in self.contracts: ercs += contract.ercs() return list(set(ercs)) def _get_features(self, contract): # pylint: disable=too-many-branches has_payable = False can_send_eth = False can_selfdestruct = False has_ecrecover = False can_delegatecall = False has_token_interaction = False has_assembly = False use_abi_encoder = False for compilation_unit in self.slither.compilation_units: for pragma in compilation_unit.pragma_directives: if ( pragma.source_mapping.filename.absolute == contract.source_mapping.filename.absolute ): if pragma.is_abi_encoder_v2: use_abi_encoder = True for function in contract.functions: if function.payable: has_payable = True if function.contains_assembly: has_assembly = True for ir in function.slithir_operations: if isinstance(ir, (LowLevelCall, HighLevelCall, Send, Transfer)) and ir.call_value: can_send_eth = True if isinstance(ir, SolidityCall) and ir.function in [ SolidityFunction("suicide(address)"), SolidityFunction("selfdestruct(address)"), ]: can_selfdestruct = True if isinstance(ir, SolidityCall) and ir.function == SolidityFunction( "ecrecover(bytes32,uint8,bytes32,bytes32)" ): has_ecrecover = True if isinstance(ir, LowLevelCall) and ir.function_name in [ "delegatecall", "callcode", ]: can_delegatecall = True if isinstance(ir, HighLevelCall): if ( isinstance(ir.function, (Function, StateVariable)) and ir.function.contract.is_possible_token ): has_token_interaction = True return { "Receive ETH": has_payable, "Send ETH": can_send_eth, "Selfdestruct": can_selfdestruct, "Ecrecover": has_ecrecover, "Delegatecall": can_delegatecall, "Tokens interaction": has_token_interaction, "AbiEncoderV2": use_abi_encoder, "Assembly": has_assembly, "Upgradeable": contract.is_upgradeable, "Proxy": contract.is_upgradeable_proxy, } def output(self, _filename): # pylint: disable=too-many-locals,too-many-statements """ _filename is not used Args: _filename(string) """ txt = "\n" txt += self._compilation_type() results = { "contracts": {"elements": []}, "number_lines": 0, "number_lines_in_dependencies": 0, "number_lines_assembly": 0, "standard_libraries": [], "ercs": [], "number_findings": {}, "detectors": [], } lines_number = self._lines_number() if lines_number: total_lines, total_dep_lines, total_tests_lines = lines_number txt += f"Number of lines: {total_lines} (+ {total_dep_lines} in dependencies, + {total_tests_lines} in tests)\n" results["number_lines"] = total_lines results["number_lines__dependencies"] = total_dep_lines total_asm_lines = self._get_number_of_assembly_lines() txt += f"Number of assembly lines: {total_asm_lines}\n" results["number_lines_assembly"] = total_asm_lines ( number_contracts, number_contracts_deps, number_contracts_tests, ) = self._number_contracts() txt += f"Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies, + {number_contracts_tests} tests) \n\n" ( txt_detectors, detectors_results, optimization, info, low, medium, high, ) = self.get_detectors_result() txt += txt_detectors results["number_findings"] = { "optimization_issues": optimization, "informational_issues": info, "low_issues": low, "medium_issues": medium, "high_issues": high, } results["detectors"] = detectors_results libs = self._standard_libraries() if libs: txt += f'\nUse: {", ".join(libs)}\n' results["standard_libraries"] = [str(l) for l in libs] ercs = self._ercs() if ercs: txt += f'ERCs: {", ".join(ercs)}\n' results["ercs"] = [str(e) for e in ercs] table = MyPrettyTable( ["Name", "# functions", "ERCS", "ERC20 info", "Complex code", "Features"] ) for contract in self.slither.contracts_derived: if contract.is_from_dependency() or contract.is_test: continue is_complex = self.is_complex_code(contract) number_functions = self._number_functions(contract) ercs = ",".join(contract.ercs()) is_erc20 = contract.is_erc20() erc20_info = "" if is_erc20: erc20_info += self.get_summary_erc20(contract) features = "\n".join( [name for name, to_print in self._get_features(contract).items() if to_print] ) table.add_row( [ contract.name, number_functions, ercs, erc20_info, is_complex, features, ] ) self.info(txt + "\n" + str(table)) results_contract = output.Output("") for contract in self.slither.contracts_derived: if contract.is_test or contract.is_from_dependency(): continue contract_d = { "contract_name": contract.name, "is_complex_code": self._is_complex_code(contract), "is_erc20": contract.is_erc20(), "number_functions": self._number_functions(contract), "features": [ name for name, to_print in self._get_features(contract).items() if to_print ], } if contract_d["is_erc20"]: pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract) contract_d["erc20_pause"] = pause if mint_limited is not None: contract_d["erc20_can_mint"] = True contract_d["erc20_mint_limited"] = mint_limited else: contract_d["erc20_can_mint"] = False contract_d["erc20_race_condition_mitigated"] = race_condition_mitigated results_contract.add_contract(contract, additional_fields=contract_d) results["contracts"]["elements"] = results_contract.elements json = self.generate_output(txt, additional_fields=results) return json
15,234
Python
.py
355
31.03662
140
0.575962
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,993
require_calls.py
NioTheFirst_ScType/slither/printers/summary/require_calls.py
""" Module printing summary of the contract """ from slither.core.declarations import SolidityFunction from slither.printers.abstract_printer import AbstractPrinter from slither.slithir.operations import SolidityCall from slither.utils.myprettytable import MyPrettyTable require_or_assert = [ SolidityFunction("assert(bool)"), SolidityFunction("require(bool)"), SolidityFunction("require(bool,string)"), ] class RequireOrAssert(AbstractPrinter): ARGUMENT = "require" HELP = "Print the require and assert calls of each function" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#require" @staticmethod def _convert(l): return "\n".join(l) def output(self, _filename): """ _filename is not used Args: _filename(string) """ all_tables = [] all_txt = "" for contract in self.slither.contracts_derived: txt = f"\nContract {contract.name}" table = MyPrettyTable(["Function", "require or assert"]) for function in contract.functions: require = function.all_slithir_operations() require = [ ir for ir in require if isinstance(ir, SolidityCall) and ir.function in require_or_assert ] require = [ir.node for ir in require] table.add_row( [ function.name, self._convert(sorted([str(m.expression) for m in set(require)])), ] ) txt += "\n" + str(table) self.info(txt) all_tables.append((contract.name, table)) all_txt += txt res = self.generate_output(all_txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
1,939
Python
.py
52
26.846154
89
0.585821
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,994
evm.py
NioTheFirst_ScType/slither/printers/summary/evm.py
""" Module printing evm mapping of the contract """ from slither.printers.abstract_printer import AbstractPrinter from slither.analyses.evm import ( generate_source_to_evm_ins_mapping, load_evm_cfg_builder, ) from slither.utils.colors import blue, green, magenta, red def _extract_evm_info(slither): """ Extract evm information for all derived contracts using evm_cfg_builder Returns: evm CFG and Solidity source to Program Counter (pc) mapping """ evm_info = {} CFG = load_evm_cfg_builder() for contract in slither.contracts_derived: contract_bytecode_runtime = contract.scope.bytecode_runtime(contract.name) contract_srcmap_runtime = contract.scope.srcmap_runtime(contract.name) cfg = CFG(contract_bytecode_runtime) evm_info["cfg", contract.name] = cfg evm_info["mapping", contract.name] = generate_source_to_evm_ins_mapping( cfg.instructions, contract_srcmap_runtime, slither, contract.source_mapping.filename.absolute, ) contract_bytecode_init = contract.scope.bytecode_init(contract.name) contract_srcmap_init = contract.scope.srcmap_init(contract.name) cfg_init = CFG(contract_bytecode_init) evm_info["cfg_init", contract.name] = cfg_init evm_info["mapping_init", contract.name] = generate_source_to_evm_ins_mapping( cfg_init.instructions, contract_srcmap_init, slither, contract.source_mapping.filename.absolute, ) return evm_info # pylint: disable=too-many-locals class PrinterEVM(AbstractPrinter): ARGUMENT = "evm" HELP = "Print the evm instructions of nodes in functions" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#evm" def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = "" if not self.slither.crytic_compile: txt = "The EVM printer requires to compile with crytic-compile" self.info(red(txt)) res = self.generate_output(txt) return res evm_info = _extract_evm_info(self.slither) for contract in self.slither.contracts_derived: txt += blue(f"Contract {contract.name}\n") contract_file = self.slither.source_code[ contract.source_mapping.filename.absolute ].encode("utf-8") with open(contract.source_mapping.filename.absolute, "r", encoding="utf8") as f: contract_file_lines = f.readlines() contract_pcs = {} contract_cfg = {} for function in contract.functions: txt += blue(f"\tFunction {function.canonical_name}\n") # CFG and source mapping depend on function being constructor or not if function.is_constructor: contract_cfg = evm_info["cfg_init", contract.name] contract_pcs = evm_info["mapping_init", contract.name] else: contract_cfg = evm_info["cfg", contract.name] contract_pcs = evm_info["mapping", contract.name] for node in function.nodes: txt += green("\t\tNode: " + str(node) + "\n") node_source_line = ( contract_file[0 : node.source_mapping.start].count("\n".encode("utf-8")) + 1 ) txt += green( f"\t\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\n" ) txt += magenta("\t\tEVM Instructions:\n") node_pcs = contract_pcs.get(node_source_line, []) for pc in node_pcs: txt += magenta(f"\t\t\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\n") for modifier in contract.modifiers: txt += blue(f"\tModifier {modifier.canonical_name}\n") for node in modifier.nodes: txt += green("\t\tNode: " + str(node) + "\n") node_source_line = ( contract_file[0 : node.source_mapping.start].count("\n".encode("utf-8")) + 1 ) txt += green( f"\t\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\n" ) txt += magenta("\t\tEVM Instructions:\n") node_pcs = contract_pcs.get(node_source_line, []) for pc in node_pcs: txt += magenta(f"\t\t\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\n") self.info(txt) res = self.generate_output(txt) return res
4,900
Python
.py
103
34.864078
117
0.573194
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,995
slithir.py
NioTheFirst_ScType/slither/printers/summary/slithir.py
""" Module printing summary of the contract """ from slither.core.declarations import Function from slither.printers.abstract_printer import AbstractPrinter def _print_function(function: Function) -> str: txt = "" for node in function.nodes: if node.expression: txt += f"\t\tExpression: {node.expression}\n" txt += "\t\tIRs:\n" for ir in node.irs: txt += f"\t\t\t{ir}\n" elif node.irs: txt += "\t\tIRs:\n" for ir in node.irs: txt += f"\t\t\t{ir}\n" return txt class PrinterSlithIR(AbstractPrinter): ARGUMENT = "slithir" HELP = "Print the slithIR representation of the functions" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#slithir" def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = "" for compilation_unit in self.slither.compilation_units: for contract in compilation_unit.contracts: if contract.is_top_level: continue txt += f"Contract {contract.name}\n" for function in contract.functions: txt += f'\tFunction {function.canonical_name} {"" if function.is_shadowed else "(*)"}\n' txt += _print_function(function) for modifier in contract.modifiers: txt += f"\tModifier {modifier.canonical_name}\n" txt += _print_function(modifier) if compilation_unit.functions_top_level: txt += "Top level functions" for function in compilation_unit.functions_top_level: txt += f"\tFunction {function.canonical_name}\n" txt += _print_function(function) self.info(txt) res = self.generate_output(txt) return res
1,945
Python
.py
48
29.541667
108
0.574603
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,996
declaration.py
NioTheFirst_ScType/slither/printers/summary/declaration.py
from slither.printers.abstract_printer import AbstractPrinter from slither.utils.source_mapping import get_definition, get_implementation, get_references class Declaration(AbstractPrinter): ARGUMENT = "declaration" HELP = "Prototype showing the source code declaration, implementation and references of the contracts objects" WIKI = "TODO" def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = "" for compilation_unit in self.slither.compilation_units: txt += "\n# Contracts\n" for contract in compilation_unit.contracts: txt += f"# {contract.name}\n" txt += f"\t- Declaration: {get_definition(contract, compilation_unit.core.crytic_compile).to_detailled_str()}\n" txt += f"\t- Implementation: {get_implementation(contract).to_detailled_str()}\n" txt += ( f"\t- References: {[x.to_detailled_str() for x in get_references(contract)]}\n" ) txt += "\n\t## Function\n" for func in contract.functions: txt += f"\t\t- {func.canonical_name}\n" txt += f"\t\t\t- Declaration: {get_definition(func, compilation_unit.core.crytic_compile).to_detailled_str()}\n" txt += ( f"\t\t\t- Implementation: {get_implementation(func).to_detailled_str()}\n" ) txt += f"\t\t\t- References: {[x.to_detailled_str() for x in get_references(func)]}\n" txt += "\n\t## State variables\n" for var in contract.state_variables: txt += f"\t\t- {var.name}\n" txt += f"\t\t\t- Declaration: {get_definition(var, compilation_unit.core.crytic_compile).to_detailled_str()}\n" txt += f"\t\t\t- Implementation: {get_implementation(var).to_detailled_str()}\n" txt += f"\t\t\t- References: {[x.to_detailled_str() for x in get_references(var)]}\n" txt += "\n\t## Structures\n" for st in contract.structures: txt += f"\t\t- {st.name}\n" txt += f"\t\t\t- Declaration: {get_definition(st, compilation_unit.core.crytic_compile).to_detailled_str()}\n" txt += f"\t\t\t- Implementation: {get_implementation(st).to_detailled_str()}\n" txt += f"\t\t\t- References: {[x.to_detailled_str() for x in get_references(st)]}\n" self.info(txt) res = self.generate_output(txt) return res
2,675
Python
.py
45
44.755556
132
0.557678
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,997
variable_order.py
NioTheFirst_ScType/slither/printers/summary/variable_order.py
""" Module printing summary of the contract """ from slither.printers.abstract_printer import AbstractPrinter from slither.utils.myprettytable import MyPrettyTable class VariableOrder(AbstractPrinter): ARGUMENT = "variable-order" HELP = "Print the storage order of the state variables" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#variable-order" def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = "" all_tables = [] for contract in self.slither.contracts_derived: txt += f"\n{contract.name}:\n" table = MyPrettyTable(["Name", "Type", "Slot", "Offset"]) for variable in contract.state_variables_ordered: if not variable.is_constant and not variable.is_immutable: slot, offset = contract.compilation_unit.storage_layout_of(contract, variable) table.add_row([variable.canonical_name, str(variable.type), slot, offset]) all_tables.append((contract.name, table)) txt += str(table) + "\n" self.info(txt) res = self.generate_output(txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
1,334
Python
.py
31
33.645161
98
0.632843
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,998
data_depenency.py
NioTheFirst_ScType/slither/printers/summary/data_depenency.py
""" Module printing summary of the contract """ from slither.printers.abstract_printer import AbstractPrinter from slither.analyses.data_dependency.data_dependency import get_dependencies from slither.slithir.variables import TemporaryVariable, ReferenceVariable from slither.utils.myprettytable import MyPrettyTable def _get(v, c): return list( { d.name for d in get_dependencies(v, c) if not isinstance(d, (TemporaryVariable, ReferenceVariable)) } ) class DataDependency(AbstractPrinter): ARGUMENT = "data-dependency" HELP = "Print the data dependencies of the variables" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#data-dependencies" def output(self, _filename): """ _filename is not used Args: _filename(string) """ all_tables = [] all_txt = "" txt = "" for c in self.contracts: if c.is_top_level: continue txt += f"\nContract {c.name}\n" table = MyPrettyTable(["Variable", "Dependencies"]) for v in c.state_variables: table.add_row([v.name, sorted(_get(v, c))]) txt += str(table) txt += "\n" for f in c.functions_and_modifiers_declared: txt += f"\nFunction {f.full_name}\n" table = MyPrettyTable(["Variable", "Dependencies"]) for v in f.variables: table.add_row([v.name, sorted(_get(v, f))]) for v in c.state_variables: table.add_row([v.canonical_name, sorted(_get(v, f))]) txt += str(table) self.info(txt) all_txt += txt all_tables.append((c.name, table)) res = self.generate_output(all_txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
1,983
Python
.py
52
27.923077
96
0.58142
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)
2,285,999
when_not_paused.py
NioTheFirst_ScType/slither/printers/summary/when_not_paused.py
""" Module printing summary of the contract """ from slither.core.declarations import Function from slither.core.declarations.function import SolidityFunction from slither.printers.abstract_printer import AbstractPrinter from slither.utils import output from slither.utils.myprettytable import MyPrettyTable def _use_modifier(function: Function, modifier_name: str = "whenNotPaused") -> bool: if function.is_constructor or function.view or function.pure: return False for internal_call in function.all_internal_calls(): if isinstance(internal_call, SolidityFunction): continue if any(modifier.name == modifier_name for modifier in function.modifiers): return True return False class PrinterWhenNotPaused(AbstractPrinter): ARGUMENT = "pausable" HELP = "Print functions that do not use whenNotPaused" WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#when-not-paused" def output(self, _filename: str) -> output.Output: """ _filename is not used Args: _filename(string) """ modifier_name: str = "whenNotPaused" txt = "" txt += "Constructor and pure/view functions are not displayed\n" all_tables = [] for contract in self.slither.contracts: txt += f"\n{contract.name}:\n" table = MyPrettyTable(["Name", "Use whenNotPaused"]) for function in contract.functions_entry_points: status = "X" if _use_modifier(function, modifier_name) else "" table.add_row([function.solidity_signature, status]) txt += str(table) + "\n" all_tables.append((contract.name, table)) self.info(txt) res = self.generate_output(txt) for name, table in all_tables: res.add_pretty_table(table, name) return res
1,918
Python
.py
44
35.386364
94
0.668821
NioTheFirst/ScType
8
4
1
AGPL-3.0
9/5/2024, 10:48:01 PM (Europe/Amsterdam)